diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 00000000000..96948b9dbe7
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,5 @@
+[*]
+end_of_line = lf
+
+[caddytest/integration/caddyfile_adapt/*.caddyfiletest]
+indent_style = tab
\ No newline at end of file
diff --git a/.gitattributes b/.gitattributes
index f42d752c91f..a0717e4b3b9 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,14 +1 @@
-# shell scripts should not use tabs to indent!
-*.bash text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2
-*.sh text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2
-
-# files for systemd (shell-similar)
-*.path text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2
-*.service text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2
-*.timer text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2
-
-# go fmt will enforce this, but in case a user has not called "go fmt" allow GIT to catch this:
-*.go text eol=lf core.whitespace whitespace=indent-with-non-tab,trailing-space,tabwidth=4
-
-*.yml text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2
-.git* text eol=auto core.whitespace whitespace=trailing-space
+*.go text eol=lf
\ No newline at end of file
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 42c2b589809..7142530e570 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -1,14 +1,14 @@
Contributing to Caddy
=====================
-Welcome! Thank you for choosing to be a part of our community. Caddy wouldn't be great without your involvement!
+Welcome! Thank you for choosing to be a part of our community. Caddy wouldn't be nearly as excellent without your involvement!
For starters, we invite you to join [the Caddy forum](https://caddy.community) where you can hang out with other Caddy users and developers.
## Common Tasks
- [Contributing code](#contributing-code)
-- [Writing a plugin](#writing-a-plugin)
+- [Writing a Caddy module](#writing-a-caddy-module)
- [Asking or answering questions for help using Caddy](#getting-help-using-caddy)
- [Reporting a bug](#reporting-bugs)
- [Suggesting an enhancement or a new feature](#suggesting-features)
@@ -17,61 +17,73 @@ For starters, we invite you to join [the Caddy forum](https://caddy.community) w
Other menu items:
- [Values](#values)
-- [Responsible Disclosure](#responsible-disclosure)
+- [Coordinated Disclosure](#coordinated-disclosure)
- [Thank You](#thank-you)
### Contributing code
-You can have a direct impact on the project by helping with its code. To contribute code to Caddy, open a [pull request](https://github.com/mholt/caddy/pulls) (PR). If you're new to our community, that's okay: **we gladly welcome pull requests from anyone, regardless of your native language or coding experience.** You can get familiar with Caddy's code base by using [code search at Sourcegraph](https://sourcegraph.com/github.com/mholt/caddy/-/search).
+You can have a huge impact on the project by helping with its code. To contribute code to Caddy, first submit or comment in an issue to discuss your contribution, then open a [pull request](https://github.com/caddyserver/caddy/pulls) (PR). If you're new to our community, that's okay: **we gladly welcome pull requests from anyone, regardless of your native language or coding experience.** You can get familiar with Caddy's code base by using [code search at Sourcegraph](https://sourcegraph.com/github.com/caddyserver/caddy).
-We hold contributions to a high standard for quality :bowtie:, so don't be surprised if we ask for revisions—even if it seems small or insignificant. Please don't take it personally. :wink: If your change is on the right track, we can guide you to make it mergable.
+We hold contributions to a high standard for quality :bowtie:, so don't be surprised if we ask for revisions—even if it seems small or insignificant. Please don't take it personally. :blue_heart: If your change is on the right track, we can guide you to make it mergeable.
Here are some of the expectations we have of contributors:
-- If your change is more than just a minor alteration, **open an issue to propose your change first.** This way we can avoid confusion, coordinate what everyone is working on, and ensure that changes are in-line with the project's goals and the best interests of its users. If there's already an issue about it, comment on the existing issue to claim it.
+- **Open an issue to propose your change first.** This way we can avoid confusion, coordinate what everyone is working on, and ensure that any changes are in-line with the project's goals and the best interests of its users. We can also discuss the best possible implementation. If there's already an issue about it, comment on the existing issue to claim it. A lot of valuable time can be saved by discussing a proposal first.
-- **Keep pull requests small.** Smaller PRs are more likely to be merged because they are easier to review! We might ask you to break up large PRs into smaller ones. [An example of what we DON'T do.](https://twitter.com/iamdevloper/status/397664295875805184)
+- **Keep pull requests small.** Smaller PRs are more likely to be merged because they are easier to review! We might ask you to break up large PRs into smaller ones. [An example of what we want to avoid.](https://twitter.com/iamdevloper/status/397664295875805184)
- **Keep related commits together in a PR.** We do want pull requests to be small, but you should also keep multiple related commits in the same PR if they rely on each other.
-- **Write tests.** Tests are essential! Written properly, they ensure your change works, and that other changes in the future won't break your change. CI checks should pass.
+- **Write tests.** Good, automated tests are very valuable! Written properly, they ensure your change works, and that other changes in the future won't break your change. CI checks should pass.
-- **Benchmarks should be included for optimizations.** Optimizations sometimes make code harder to read or have changes that are less than obvious. They should be proven with benchmarks or profiling.
+- **Benchmarks should be included for optimizations.** Optimizations sometimes make code harder to read or have changes that are less than obvious. They should be proven with benchmarks and profiling.
- **[Squash](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) insignificant commits.** Every commit should be significant. Commits which merely rewrite a comment or fix a typo can be combined into another commit that has more substance. Interactive rebase can do this, or a simpler way is `git reset --soft ` then `git commit -s`.
-- **Own your contributions.** Caddy is a growing project, and it's much better when individual contributors help maintain their change after it is merged.
+- **Be responsible for and maintain your contributions.** Caddy is a growing project, and it's much better when individual contributors help maintain their change after it is merged.
- **Use comments properly.** We expect good godoc comments for package-level functions, types, and values. Comments are also useful whenever the purpose for a line of code is not obvious.
-We often grant [collaborator status](#collaborator-instructions) to contributors who author one or more significant, high-quality PRs that are merged into the code base!
+- **Pull requests may still get closed.** The longer a PR stays open and idle, the more likely it is to be closed. If we haven't reviewed it in a while, it probably means the change is not a priority. Please don't take this personally, we're trying to balance a lot of tasks! If nobody else has commented or reacted to the PR, it likely means your change is useful only to you. The reality is this happens quite a lot. We don't tend to accept PRs that aren't generally helpful. For these reasons or others, the PR may get closed even after a review. We are not obligated to accept all proposed changes, even if the best justification we can give is something vague like, "It doesn't sit right." Sometimes PRs are just the wrong thing or the wrong time. Because it is open source, you can always build your own modified version of Caddy with a change you need, even if we reject it in the official repo. Plus, because Caddy is extensible, it's possible your feature could make a great plugin instead!
+
+- **You certify that you wrote and comprehend the code you submit.** The Caddy project welcomes original contributions that comply with [our CLA](https://cla-assistant.io/caddyserver/caddy), meaning that authors must be able to certify that they created or have rights to the code they are contributing. In addition, we require that code is not simply copy-pasted from Q/A sites or AI language models without full comprehension and rigorous testing. In other words: contributors are allowed to refer to communities for assistance and use AI tools such as language models for inspiration, but code which originates from or is assisted by these resources MUST be:
+
+ - Licensed for you to freely share
+ - Fully comprehended by you (be able to explain every line of code)
+ - Verified by automated tests when feasible, or thorough manual tests otherwise
+
+ We have found that current language models (LLMs, like ChatGPT) may understand code syntax and even problem spaces to an extent, but often fail in subtle ways to convey true knowledge and produce correct algorithms. Integrated tools such as GitHub Copilot and Sourcegraph Cody may be used for inspiration, but code generated by these tools still needs to meet our criteria for licensing, human comprehension, and testing. These tools may be used to help write code comments and tests as long as you can certify they are accurate and correct. Note that it is often more trouble than it's worth to certify that Copilot (for example) is not giving you code that is possibly plagiarised, unlicensed, or licensed with incompatible terms -- as the Caddy project cannot accept such contributions. If that's too difficult for you (or impossible), then we recommend using these resources only for inspiration and write your own code. Ultimately, you (the contributor) are responsible for the code you're submitting.
+
+ As a courtesy to reviewers, we kindly ask that you disclose when contributing code that was generated by an AI tool or copied from another website so we can be aware of what to look for in code review.
+
+We often grant [collaborator status](#collaborator-instructions) to contributors who author one or more significant, high-quality PRs that are merged into the code base.
#### HOW TO MAKE A PULL REQUEST TO CADDY
-Contributing to Go projects on GitHub is fun and easy. We recommend the following workflow:
+Contributing to Go projects on GitHub is fun and easy. After you have proposed your change in an issue, we recommend the following workflow:
-1. [Fork this repo](https://github.com/mholt/caddy). This makes a copy of the code you can write to.
+1. [Fork this repo](https://github.com/caddyserver/caddy). This makes a copy of the code you can write to.
-2. If you don't already have this repo (mholt/caddy.git) repo on your computer, get it with `go get github.com/mholt/caddy/caddy`.
+2. If you don't already have this repo (caddyserver/caddy.git) repo on your computer, clone it down: `git clone https://github.com/caddyserver/caddy.git`
-3. Tell git that it can push the mholt/caddy.git repo to your fork by adding a remote: `git remote add myfork https://github.com/you/caddy.git`
+3. Tell git that it can push the caddyserver/caddy.git repo to your fork by adding a remote: `git remote add myfork https://github.com//caddy.git`
-4. Make your changes in the mholt/caddy.git repo on your computer.
+4. Make your changes in the caddyserver/caddy.git repo on your computer.
5. Push your changes to your fork: `git push myfork`
-6. [Create a pull request](https://github.com/mholt/caddy/pull/new/master) to merge your changes into mholt/caddy @ master. (Click "compare across forks" and change the head fork.)
+6. [Create a pull request](https://github.com/caddyserver/caddy/pull/new/master) to merge your changes into caddyserver/caddy @ master. (Click "compare across forks" and change the head fork.)
This workflow is nice because you don't have to change import paths. You can get fancier by using different branches if you want.
-### Writing a plugin
+### Writing a Caddy module
-Caddy can do more with plugins! Anyone can write a plugin. Plugins are Go libraries that get compiled into Caddy, extending its feature set. They can add directives to the Caddyfile, change how the Caddyfile is loaded, and even implement new server types (e.g. HTTP, DNS). When it's ready, you can submit your plugin to the Caddy website so others can download it.
+Caddy can do more with modules! Anyone can write one. Caddy modules are Go libraries that get compiled into Caddy, extending its feature set. They can add directives to the Caddyfile, add new configuration adapters, and even implement new server types (e.g. HTTP, DNS).
-[Learn how to write and submit a plugin](https://github.com/mholt/caddy/wiki) on the wiki. You should also share and discuss your plugin idea [on the forums](https://caddy.community) to have people test it out. We don't use the Caddy issue tracker for plugins.
+[Learn how to write a module here](https://caddyserver.com/docs/extending-caddy). You should also share and discuss your module idea [on the forums](https://caddy.community) to have people test it out. We don't use the Caddy issue tracker for third-party modules.
### Getting help using Caddy
@@ -83,35 +95,61 @@ Many people on the forums could benefit from your experience and expertise, too.
### Reporting bugs
-Like every software, Caddy has its flaws. If you find one, [search the issues](https://github.com/mholt/caddy/issues) to see if it has already been reported. If not, [open a new issue](https://github.com/mholt/caddy/issues/new) and describe the bug, and somebody will look into it! (This repository is only for Caddy, not plugins.)
+Like every software, Caddy has its flaws. If you find one, [search the issues](https://github.com/caddyserver/caddy/issues) to see if it has already been reported. If not, [open a new issue](https://github.com/caddyserver/caddy/issues/new) and describe the bug, and somebody will look into it! (This repository is only for Caddy and its standard modules.)
-**You can help stop bugs in their tracks!** Speed up the patch by identifying the bug in the code. This can sometimes be done by adding `fmt.Println()` statements (or similar) in relevant code paths to narrow down where the problem may be. It's a good way to [introduce yourself to the Go language](https://tour.golang.org), too.
+**You can help us fix bugs!** Speed up the patch by identifying the bug in the code. This can sometimes be done by adding `fmt.Println()` statements (or similar) in relevant code paths to narrow down where the problem may be. It's a good way to [introduce yourself to the Go language](https://tour.golang.org), too.
-Please follow the issue template so we have all the needed information. Unredacted—yes, actual values matter. We need to be able to repeat the bug using your instructions. Please simplify the issue as much as possible. The burden is on you to convince us that it is actually a bug in Caddy. This is easiest to do when you write clear, concise instructions so we can reproduce the behavior (even if it seems obvious). The more detailed and specific you are, the faster we will be able to help you!
+We may reply with an issue template. Please follow the template so we have all the needed information. Unredacted—yes, actual values matter. We need to be able to repeat the bug using your instructions. Please simplify the issue as much as possible. If you don't, we might close your report. The burden is on you to make it easily reproducible and to convince us that it is actually a bug in Caddy. This is easiest to do when you write clear, concise instructions so we can reproduce the behavior (even if it seems obvious). The more detailed and specific you are, the faster we will be able to help you!
We suggest reading [How to Report Bugs Effectively](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html).
Please be kind. :smile: Remember that Caddy comes at no cost to you, and you're getting free support when we fix your issues. If we helped you, please consider helping someone else!
+#### Bug reporting expectations
+
+Maintainers---or more generally, developers---need three things to act on bugs:
+
+1. To agree or be convinced that it's a bug (reporter's responsibility).
+ - A bug is unintentional, undesired, or surprising behavior which violates documentation or relevant spec. It might be either a mistake in the documentation or a bug in the code.
+ - This project usually does not work around bugs in other software, systems, and dependencies; instead, we recommend that those bugs are fixed at their source. This sometimes means we close issues or reject PRs that attempt to fix, workaround, or hide bugs in other projects.
+
+2. To be able to understand what is happening (mostly reporter's responsibility).
+ - If the reporter can provide satisfactory instructions such that a developer can reproduce the bug, the developer will likely be able to understand the bug, write a test case, and implement a fix. This is the least amount of work for everyone and path to the fastest resolution.
+ - Otherwise, the burden is on the reporter to test possible solutions. This is less preferable because it loosens the feedback loop, slows down debugging efforts, obscures the true nature of the problem from the developers, and is unlikely to result in new test cases.
+
+3. A solution, or ideas toward a solution (mostly maintainer's responsibility).
+ - Sometimes the best solution is a documentation change.
+ - Usually the developers have the best domain knowledge for inventing a solution, but reporters may have ideas or preferences for how they would like the software to work.
+ - Security, correctness, and project goals/vision all take priority over a user's preferences.
+ - It's simply good business to yield a solution that satisfies the users, and it's even better business to leave them impressed.
+
+Thus, at the very least, the reporter is expected to:
+
+1. Convince the reader that it's a bug in Caddy (if it's not obvious).
+2. Reduce the problem down to the minimum specific steps required to reproduce it.
+
+The maintainer is usually able to do the rest; but of course the reporter may invest additional effort to speed up the process.
+
+
### Suggesting features
-First, [search to see if your feature has already been requested](https://github.com/mholt/caddy/issues). If it has, you can add a :+1: reaction to vote for it. If your feature idea is new, open an issue to request the feature. You don't have to follow the bug template for feature requests. Please describe your idea thoroughly so that we know how to implement it! Really vague requests may not be helpful or actionable and without clarification will have to be closed.
+First, [search to see if your feature has already been requested](https://github.com/caddyserver/caddy/issues). If it has, you can add a :+1: reaction to vote for it. If your feature idea is new, open an issue to request the feature. Please describe your idea thoroughly so that we know how to implement it! Really vague requests may not be helpful or actionable and, without clarification, will have to be closed.
-While we really do value your requests and implement many of them, not all features are a good fit for Caddy. Most of those [make good plugins](https://github.com/mholt/caddy/wiki), though, which can be made by anyone! But if a feature is not in the best interest of the Caddy project or its users in general, we may politely decline to implement it into Caddy core.
+While we really do value your requests and implement many of them, not all features are a good fit for Caddy. Most of those [make good modules](#writing-a-caddy-module), which can be made by anyone! But if a feature is not in the best interest of the Caddy project or its users in general, we may politely decline to implement it into Caddy core. Additionally, some features are bad ideas altogether (for either obvious or non-obvious reasons) which may be rejected. We'll try to explain why we reject a feature, but sometimes the best we can do is, "It's not a good fit for the project."
### Improving documentation
-Caddy's documentation is available at [https://caddyserver.com/docs](https://caddyserver.com/docs). If you would like to make a fix to the docs, feel free to contribute at the [caddyserver/website](https://github.com/caddyserver/website) repository!
-
-Note that plugin documentation is not hosted by the Caddy website, other than basic usage examples. They are managed by the individual plugin authors, and you will have to contact them to change their documentation.
+Caddy's documentation is available at [https://caddyserver.com/docs](https://caddyserver.com/docs) and its source is in the [website repo](https://github.com/caddyserver/website). If you would like to make a fix to the docs, please submit an issue there describing the change to make.
+Note that third-party module documentation is not hosted by the Caddy website, other than basic usage examples. They are managed by the individual module authors, and you will have to contact them to change their documentation.
+Our documentation is scoped to the Caddy project only: it is not for describing how other software or systems work, even if they relate to Caddy or web servers. That kind of content [can be found in our community wiki](https://caddy.community/c/wiki/13), however.
## Collaborator Instructions
-Collabators have push rights to the repository. We grant this permission after one or more successful, high-quality PRs are merged! We thank them for their help.The expectations we have of collaborators are:
+Collaborators have push rights to the repository. We grant this permission after one or more successful, high-quality PRs are merged! We thank them for their help. The expectations we have of collaborators are:
- **Help review pull requests.** Be meticulous, but also kind. We love our contributors, but we critique the contribution to make it better. Multiple, thorough reviews make for the best contributions! Here are some questions to consider:
- Can the change be made more elegant?
@@ -128,9 +166,9 @@ Collabators have push rights to the repository. We grant this permission after o
- **Prefer squashed commits over a messy merge.** If there are many little commits, please [squash the commits](https://stackoverflow.com/a/11732910/1048862) so we don't clutter the commit history.
-- **Don't accept new dependencies lightly.** Dependencies can make the world crash and burn, but they are sometimes necessary. Choose carefully. Extremely small dependencies (a few lines of code) can be inlined. The rest may not be needed. For those that are, Caddy vendors all dependencies with the help of [gvt](https://github.com/FiloSottile/gvt). All external dependencies must be vendored, and _Caddy must not export any types defined by those dependencies_. Check this diligently!
+- **Don't accept new dependencies lightly.** Dependencies can make the world crash and burn, but they are sometimes necessary. Choose carefully. Extremely small dependencies (a few lines of code) can be inlined. The rest may not be needed. For those that are, Caddy uses [go modules](https://github.com/golang/go/wiki/Modules). All external dependencies must be installed as modules, and _Caddy must not export any types defined by those dependencies_. Check this diligently!
-- **Be extra careful in some areas of the code.** There are some critical areas in the Caddy code base that we review extra meticulously: the `caddy` and `caddytls` packages especially.
+- **Be extra careful in some areas of the code.** There are some critical areas in the Caddy code base that we review extra meticulously: the `caddyhttp` and `caddytls` packages especially.
- **Make sure tests test the actual thing.** Double-check that the tests fail without the change, and pass with it. It's important that they assert what they're purported to assert.
@@ -142,19 +180,18 @@ Collabators have push rights to the repository. We grant this permission after o
-## Values
+## Values (WIP)
- A person is always more important than code. People don't like being handled "efficiently". But we can still process issues and pull requests efficiently while being kind, patient, and considerate.
- The ends justify the means, if the means are good. A good tree won't produce bad fruit. But if we cut corners or are hasty in our process, the end result will not be good.
-## Responsible Disclosure
+## Security Policy
-If you've found a security vulnerability, please email me, the author, directly: Matthew dot Holt at Gmail. I'll need enough information to verify the bug and make a patch. It will speed things up if you suggest a working patch. If your report is valid and a patch is released, we will not reveal your identity by default. If you wish to be credited, please give me the name to use. Thanks for responsibly helping Caddy—and thousands of websites—be more secure!
+If you think you've found a security vulnerability, please refer to our [Security Policy](https://github.com/caddyserver/caddy/security/policy) document.
## Thank you
-Thanks for your help! Caddy would not be what it is today without your
-contributions.
+Thanks for your help! Caddy would not be what it is today without your contributions.
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 00000000000..4703554bd31
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,12 @@
+# These are supported funding model platforms
+
+github: [mholt] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
+patreon: # Replace with a single Patreon username
+open_collective: # Replace with a single Open Collective username
+ko_fi: # Replace with a single Ko-fi username
+tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
+community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
+liberapay: # Replace with a single Liberapay username
+issuehunt: # Replace with a single IssueHunt username
+otechie: # Replace with a single Otechie username
+custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index a7963b20c8a..00000000000
--- a/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-### 1. What version of Caddy are you using (`caddy -version`)?
-
-
-### 2. What are you trying to do?
-
-
-### 3. What is your entire Caddyfile?
-```text
-(paste Caddyfile here)
-```
-
-### 4. How did you run Caddy (give the full command and describe the execution environment)?
-
-
-### 5. Please paste any relevant HTTP request(s) here.
-
-
-
-
-### 6. What did you expect to see?
-
-
-### 7. What did you see instead (give full error messages and/or log)?
-
-
-### 8. How can someone who is starting from scratch reproduce the bug as minimally as possible?
-
-
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index 3d0eba238b0..00000000000
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-### 1. What does this change do, exactly?
-
-
-### 2. Please link to the relevant issues.
-
-
-### 3. Which documentation changes (if any) need to be made because of this PR?
-
-
-### 4. Checklist
-
-- [ ] I have written tests and verified that they fail without my change
-- [ ] I have squashed any insignificant commits
-- [ ] This change has comments for package types, values, functions, and non-obvious lines of code
-- [ ] I am willing to help maintain this change if there are issues with it later
diff --git a/.github/SECURITY.md b/.github/SECURITY.md
new file mode 100644
index 00000000000..557b4bac111
--- /dev/null
+++ b/.github/SECURITY.md
@@ -0,0 +1,59 @@
+# Security Policy
+
+The Caddy project would like to make sure that it stays on top of all practically-exploitable vulnerabilities.
+
+
+## Supported Versions
+
+| Version | Supported |
+| -------- | ----------|
+| 2.latest | ✔️ |
+| 1.x | :x: |
+| < 1.x | :x: |
+
+
+## Acceptable Scope
+
+A security report must demonstrate a security bug in the source code from this repository.
+
+Some security problems are the result of interplay between different components of the Web, rather than a vulnerability in the web server itself. Please only report vulnerabilities in the web server itself, as we cannot coerce the rest of the Web to be fixed (for example, we do not consider IP spoofing, BGP hijacks, or missing/misconfigured HTTP headers a vulnerability in the Caddy web server).
+
+Vulnerabilities caused by misconfigurations are out of scope. Yes, it is entirely possible to craft and use a configuration that is unsafe, just like with every other web server; we recommend against doing that.
+
+We do not accept reports if the steps imply or require a compromised system or third-party software, as we cannot control those. We expect that users secure their own systems and keep all their software patched. For example, if untrusted users are able to upload/write/host arbitrary files in the web root directory, it is NOT a security bug in Caddy if those files get served to clients; however, it _would_ be a valid report if a bug in Caddy's source code unintentionally gave unauthorized users the ability to upload unsafe files or delete files without relying on an unpatched system or piece of software.
+
+Client-side exploits are out of scope. In other words, it is not a bug in Caddy if the web browser does something unsafe, even if the downloaded content was served by Caddy. (Those kinds of exploits can generally be mitigated by proper configuration of HTTP headers.) As a general rule, the content served by Caddy is not considered in scope because content is configurable by the site owner or the associated web application.
+
+Security bugs in code dependencies (including Go's standard library) are out of scope. Instead, if a dependency has patched a relevant security bug, please feel free to open a public issue or pull request to update that dependency in our code.
+
+
+## Reporting a Vulnerability
+
+We get a lot of difficult reports that turn out to be invalid. Clear, obvious reports tend to be the most credible (but are also rare).
+
+First please ensure your report falls within the accepted scope of security bugs (above).
+
+We'll need enough information to verify the bug and make a patch. To speed things up, please include:
+
+- Most minimal possible config (without redactions!)
+- Command(s)
+- Precise HTTP requests (`curl -v` and its output please)
+- Full log output (please enable debug mode)
+- Specific minimal steps to reproduce the issue from scratch
+- A working patch
+
+Please DO NOT use containers, VMs, cloud instances or services, or any other complex infrastructure in your steps. Always prefer `curl -v` instead of web browsers.
+
+We consider publicly-registered domain names to be public information. This necessary in order to maintain the integrity of certificate transparency, public DNS, and other public trust systems. Do not redact domain names from your reports. The actual content of your domain name affects Caddy's behavior, so we need the exact domain name(s) to reproduce with, or your report will be ignored.
+
+It will speed things up if you suggest a working patch, such as a code diff, and explain why and how it works. Reports that are not actionable, do not contain enough information, are too pushy/demanding, or are not able to convince us that it is a viable and practical attack on the web server itself may be deferred to a later time or possibly ignored, depending on available resources. Priority will be given to credible, responsible reports that are constructive, specific, and actionable. (We get a lot of invalid reports.) Thank you for understanding.
+
+When you are ready, please email Matt Holt (the author) directly: matt at dyanim dot com.
+
+Please don't encrypt the email body. It only makes the process more complicated.
+
+Please also understand that due to our nature as an open source project, we do not have a budget to award security bounties. We can only thank you.
+
+If your report is valid and a patch is released, we will not reveal your identity by default. If you wish to be credited, please give us the name to use and/or your GitHub username. If you don't provide this we can't credit you.
+
+Thanks for responsibly helping Caddy—and thousands of websites—be more secure!
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000000..64284b90748
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,7 @@
+---
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "monthly"
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 00000000000..7f8e6d501f5
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,220 @@
+# Used as inspiration: https://github.com/mvdan/github-actions-golang
+
+name: Tests
+
+on:
+ push:
+ branches:
+ - master
+ - 2.*
+ pull_request:
+ branches:
+ - master
+ - 2.*
+
+jobs:
+ test:
+ strategy:
+ # Default is true, cancels jobs for other platforms in the matrix if one fails
+ fail-fast: false
+ matrix:
+ os:
+ - linux
+ - mac
+ - windows
+ go:
+ - '1.23'
+ - '1.24'
+
+ include:
+ # Set the minimum Go patch version for the given Go minor
+ # Usable via ${{ matrix.GO_SEMVER }}
+ - go: '1.23'
+ GO_SEMVER: '~1.23.6'
+
+ - go: '1.24'
+ GO_SEMVER: '~1.24.0'
+
+ # Set some variables per OS, usable via ${{ matrix.VAR }}
+ # OS_LABEL: the VM label from GitHub Actions (see https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories)
+ # CADDY_BIN_PATH: the path to the compiled Caddy binary, for artifact publishing
+ # SUCCESS: the typical value for $? per OS (Windows/pwsh returns 'True')
+ - os: linux
+ OS_LABEL: ubuntu-latest
+ CADDY_BIN_PATH: ./cmd/caddy/caddy
+ SUCCESS: 0
+
+ - os: mac
+ OS_LABEL: macos-14
+ CADDY_BIN_PATH: ./cmd/caddy/caddy
+ SUCCESS: 0
+
+ - os: windows
+ OS_LABEL: windows-latest
+ CADDY_BIN_PATH: ./cmd/caddy/caddy.exe
+ SUCCESS: 'True'
+
+ runs-on: ${{ matrix.OS_LABEL }}
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Install Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: ${{ matrix.GO_SEMVER }}
+ check-latest: true
+
+ # These tools would be useful if we later decide to reinvestigate
+ # publishing test/coverage reports to some tool for easier consumption
+ # - name: Install test and coverage analysis tools
+ # run: |
+ # go get github.com/axw/gocov/gocov
+ # go get github.com/AlekSi/gocov-xml
+ # go get -u github.com/jstemmer/go-junit-report
+ # echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
+
+ - name: Print Go version and environment
+ id: vars
+ shell: bash
+ run: |
+ printf "Using go at: $(which go)\n"
+ printf "Go version: $(go version)\n"
+ printf "\n\nGo environment:\n\n"
+ go env
+ printf "\n\nSystem environment:\n\n"
+ env
+ printf "Git version: $(git version)\n\n"
+ # Calculate the short SHA1 hash of the git commit
+ echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
+
+ - name: Get dependencies
+ run: |
+ go get -v -t -d ./...
+ # mkdir test-results
+
+ - name: Build Caddy
+ working-directory: ./cmd/caddy
+ env:
+ CGO_ENABLED: 0
+ run: |
+ go build -tags nobadger -trimpath -ldflags="-w -s" -v
+
+ - name: Smoke test Caddy
+ working-directory: ./cmd/caddy
+ run: |
+ ./caddy start
+ ./caddy stop
+
+ - name: Publish Build Artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: caddy_${{ runner.os }}_go${{ matrix.go }}_${{ steps.vars.outputs.short_sha }}
+ path: ${{ matrix.CADDY_BIN_PATH }}
+ compression-level: 0
+
+ # Commented bits below were useful to allow the job to continue
+ # even if the tests fail, so we can publish the report separately
+ # For info about set-output, see https://stackoverflow.com/questions/57850553/github-actions-check-steps-status
+ - name: Run tests
+ # id: step_test
+ # continue-on-error: true
+ run: |
+ # (go test -v -coverprofile=cover-profile.out -race ./... 2>&1) > test-results/test-result.out
+ go test -tags nobadger -v -coverprofile="cover-profile.out" -short -race ./...
+ # echo "status=$?" >> $GITHUB_OUTPUT
+
+ # Relevant step if we reinvestigate publishing test/coverage reports
+ # - name: Prepare coverage reports
+ # run: |
+ # mkdir coverage
+ # gocov convert cover-profile.out > coverage/coverage.json
+ # # Because Windows doesn't work with input redirection like *nix, but output redirection works.
+ # (cat ./coverage/coverage.json | gocov-xml) > coverage/coverage.xml
+
+ # To return the correct result even though we set 'continue-on-error: true'
+ # - name: Coerce correct build result
+ # if: matrix.os != 'windows' && steps.step_test.outputs.status != ${{ matrix.SUCCESS }}
+ # run: |
+ # echo "step_test ${{ steps.step_test.outputs.status }}\n"
+ # exit 1
+
+ s390x-test:
+ name: test (s390x on IBM Z)
+ runs-on: ubuntu-latest
+ if: github.event.pull_request.head.repo.full_name == 'caddyserver/caddy' && github.actor != 'dependabot[bot]'
+ continue-on-error: true # August 2020: s390x VM is down due to weather and power issues
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ - name: Run Tests
+ run: |
+ set +e
+ mkdir -p ~/.ssh && echo -e "${SSH_KEY//_/\\n}" > ~/.ssh/id_ecdsa && chmod og-rwx ~/.ssh/id_ecdsa
+
+ # short sha is enough?
+ short_sha=$(git rev-parse --short HEAD)
+
+ # To shorten the following lines
+ ssh_opts="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
+ ssh_host="$CI_USER@ci-s390x.caddyserver.com"
+
+ # The environment is fresh, so there's no point in keeping accepting and adding the key.
+ rsync -arz -e "ssh $ssh_opts" --progress --delete --exclude '.git' . "$ssh_host":/var/tmp/"$short_sha"
+ ssh $ssh_opts -t "$ssh_host" bash < 0)); do
+ CGO_ENABLED=0 go test -p 1 -tags nobadger -v ./...
+ exit_code=$?
+ if ((exit_code == 0)); then
+ break
+ fi
+ echo "\n\nTest failed: \$exit_code, retrying..."
+ ((retries--))
+ done
+ echo "Remote exit code: \$exit_code"
+ exit \$exit_code
+ EOF
+ test_result=$?
+
+ # There's no need leaving the files around
+ ssh $ssh_opts "$ssh_host" "rm -rf /var/tmp/'$short_sha'"
+
+ echo "Test exit code: $test_result"
+ exit $test_result
+ env:
+ SSH_KEY: ${{ secrets.S390X_SSH_KEY }}
+ CI_USER: ${{ secrets.CI_USER }}
+
+ goreleaser-check:
+ runs-on: ubuntu-latest
+ if: github.event.pull_request.head.repo.full_name == 'caddyserver/caddy' && github.actor != 'dependabot[bot]'
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - uses: goreleaser/goreleaser-action@v6
+ with:
+ version: latest
+ args: check
+ - name: Install Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: "~1.24"
+ check-latest: true
+ - name: Install xcaddy
+ run: |
+ go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
+ xcaddy version
+ - uses: goreleaser/goreleaser-action@v6
+ with:
+ version: latest
+ args: build --single-target --snapshot
+ env:
+ TAG: ${{ github.head_ref || github.ref_name }}
diff --git a/.github/workflows/cross-build.yml b/.github/workflows/cross-build.yml
new file mode 100644
index 00000000000..231e4a6e2cd
--- /dev/null
+++ b/.github/workflows/cross-build.yml
@@ -0,0 +1,73 @@
+name: Cross-Build
+
+on:
+ push:
+ branches:
+ - master
+ - 2.*
+ pull_request:
+ branches:
+ - master
+ - 2.*
+
+jobs:
+ build:
+ strategy:
+ fail-fast: false
+ matrix:
+ goos:
+ - 'aix'
+ - 'linux'
+ - 'solaris'
+ - 'illumos'
+ - 'dragonfly'
+ - 'freebsd'
+ - 'openbsd'
+ - 'windows'
+ - 'darwin'
+ - 'netbsd'
+ go:
+ - '1.23'
+ - '1.24'
+
+ include:
+ # Set the minimum Go patch version for the given Go minor
+ # Usable via ${{ matrix.GO_SEMVER }}
+ - go: '1.23'
+ GO_SEMVER: '~1.23.6'
+
+ - go: '1.24'
+ GO_SEMVER: '~1.24.0'
+
+ runs-on: ubuntu-latest
+ continue-on-error: true
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Install Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: ${{ matrix.GO_SEMVER }}
+ check-latest: true
+
+ - name: Print Go version and environment
+ id: vars
+ run: |
+ printf "Using go at: $(which go)\n"
+ printf "Go version: $(go version)\n"
+ printf "\n\nGo environment:\n\n"
+ go env
+ printf "\n\nSystem environment:\n\n"
+ env
+
+ - name: Run Build
+ env:
+ CGO_ENABLED: 0
+ GOOS: ${{ matrix.goos }}
+ GOARCH: ${{ matrix.goos == 'aix' && 'ppc64' || 'amd64' }}
+ shell: bash
+ continue-on-error: true
+ working-directory: ./cmd/caddy
+ run: |
+ GOOS=$GOOS GOARCH=$GOARCH go build -tags=nobadger,nomysql,nopgx -trimpath -o caddy-"$GOOS"-$GOARCH 2> /dev/null
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
new file mode 100644
index 00000000000..3cfe893df8f
--- /dev/null
+++ b/.github/workflows/lint.yml
@@ -0,0 +1,67 @@
+name: Lint
+
+on:
+ push:
+ branches:
+ - master
+ - 2.*
+ pull_request:
+ branches:
+ - master
+ - 2.*
+
+permissions:
+ contents: read
+
+jobs:
+ # From https://github.com/golangci/golangci-lint-action
+ golangci:
+ permissions:
+ contents: read # for actions/checkout to fetch code
+ pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
+ name: lint
+ strategy:
+ matrix:
+ os:
+ - linux
+ - mac
+ - windows
+
+ include:
+ - os: linux
+ OS_LABEL: ubuntu-latest
+
+ - os: mac
+ OS_LABEL: macos-14
+
+ - os: windows
+ OS_LABEL: windows-latest
+
+ runs-on: ${{ matrix.OS_LABEL }}
+
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-go@v5
+ with:
+ go-version: '~1.24'
+ check-latest: true
+
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v6
+ with:
+ version: latest
+
+ # Windows times out frequently after about 5m50s if we don't set a longer timeout.
+ args: --timeout 10m
+
+ # Optional: show only new issues if it's a pull request. The default value is `false`.
+ # only-new-issues: true
+
+ govulncheck:
+ runs-on: ubuntu-latest
+ steps:
+ - name: govulncheck
+ uses: golang/govulncheck-action@v1
+ with:
+ go-version-input: '~1.24.0'
+ check-latest: true
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 00000000000..df42679bb40
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,178 @@
+name: Release
+
+on:
+ push:
+ tags:
+ - 'v*.*.*'
+
+jobs:
+ release:
+ name: Release
+ strategy:
+ matrix:
+ os:
+ - ubuntu-latest
+ go:
+ - '1.24'
+
+ include:
+ # Set the minimum Go patch version for the given Go minor
+ # Usable via ${{ matrix.GO_SEMVER }}
+ - go: '1.24'
+ GO_SEMVER: '~1.24.0'
+
+ runs-on: ${{ matrix.os }}
+ # https://github.com/sigstore/cosign/issues/1258#issuecomment-1002251233
+ # https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings
+ permissions:
+ id-token: write
+ # https://docs.github.com/en/rest/overview/permissions-required-for-github-apps#permission-on-contents
+ # "Releases" is part of `contents`, so it needs the `write`
+ contents: write
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Install Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: ${{ matrix.GO_SEMVER }}
+ check-latest: true
+
+ # Force fetch upstream tags -- because 65 minutes
+ # tl;dr: actions/checkout@v4 runs this line:
+ # git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules --depth=1 origin +ebc278ec98bb24f2852b61fde2a9bf2e3d83818b:refs/tags/
+ # which makes its own local lightweight tag, losing all the annotations in the process. Our earlier script ran:
+ # git fetch --prune --unshallow
+ # which doesn't overwrite that tag because that would be destructive.
+ # Credit to @francislavoie for the investigation.
+ # https://github.com/actions/checkout/issues/290#issuecomment-680260080
+ - name: Force fetch upstream tags
+ run: git fetch --tags --force
+
+ # https://github.community/t5/GitHub-Actions/How-to-get-just-the-tag-name/m-p/32167/highlight/true#M1027
+ - name: Print Go version and environment
+ id: vars
+ run: |
+ printf "Using go at: $(which go)\n"
+ printf "Go version: $(go version)\n"
+ printf "\n\nGo environment:\n\n"
+ go env
+ printf "\n\nSystem environment:\n\n"
+ env
+ echo "version_tag=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
+ echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
+
+ # Add "pip install" CLI tools to PATH
+ echo ~/.local/bin >> $GITHUB_PATH
+
+ # Parse semver
+ TAG=${GITHUB_REF/refs\/tags\//}
+ SEMVER_RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z\.-]*\)'
+ TAG_MAJOR=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\1#"`
+ TAG_MINOR=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\2#"`
+ TAG_PATCH=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\3#"`
+ TAG_SPECIAL=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\4#"`
+ echo "tag_major=${TAG_MAJOR}" >> $GITHUB_OUTPUT
+ echo "tag_minor=${TAG_MINOR}" >> $GITHUB_OUTPUT
+ echo "tag_patch=${TAG_PATCH}" >> $GITHUB_OUTPUT
+ echo "tag_special=${TAG_SPECIAL}" >> $GITHUB_OUTPUT
+
+ # Cloudsmith CLI tooling for pushing releases
+ # See https://help.cloudsmith.io/docs/cli
+ - name: Install Cloudsmith CLI
+ run: pip install --upgrade cloudsmith-cli
+
+ - name: Validate commits and tag signatures
+ run: |
+
+ # Import Matt Holt's key
+ curl 'https://github.com/mholt.gpg' | gpg --import
+
+ echo "Verifying the tag: ${{ steps.vars.outputs.version_tag }}"
+ # tags are only accepted if signed by Matt's key
+ git verify-tag "${{ steps.vars.outputs.version_tag }}" || exit 1
+
+ - name: Install Cosign
+ uses: sigstore/cosign-installer@main
+ - name: Cosign version
+ run: cosign version
+ - name: Install Syft
+ uses: anchore/sbom-action/download-syft@main
+ - name: Syft version
+ run: syft version
+ - name: Install xcaddy
+ run: |
+ go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
+ xcaddy version
+ # GoReleaser will take care of publishing those artifacts into the release
+ - name: Run GoReleaser
+ uses: goreleaser/goreleaser-action@v6
+ with:
+ version: latest
+ args: release --clean --timeout 60m
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ TAG: ${{ steps.vars.outputs.version_tag }}
+ COSIGN_EXPERIMENTAL: 1
+
+ # Only publish on non-special tags (e.g. non-beta)
+ # We will continue to push to Gemfury for the foreseeable future, although
+ # Cloudsmith is probably better, to not break things for existing users of Gemfury.
+ # See https://gemfury.com/caddy/deb:caddy
+ - name: Publish .deb to Gemfury
+ if: ${{ steps.vars.outputs.tag_special == '' }}
+ env:
+ GEMFURY_PUSH_TOKEN: ${{ secrets.GEMFURY_PUSH_TOKEN }}
+ run: |
+ for filename in dist/*.deb; do
+ # armv6 and armv7 are both "armhf" so we can skip the duplicate
+ if [[ "$filename" == *"armv6"* ]]; then
+ echo "Skipping $filename"
+ continue
+ fi
+
+ curl -F package=@"$filename" https://${GEMFURY_PUSH_TOKEN}:@push.fury.io/caddy/
+ done
+
+ # Publish only special tags (unstable/beta/rc) to the "testing" repo
+ # See https://cloudsmith.io/~caddy/repos/testing/
+ - name: Publish .deb to Cloudsmith (special tags)
+ if: ${{ steps.vars.outputs.tag_special != '' }}
+ env:
+ CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }}
+ run: |
+ for filename in dist/*.deb; do
+ # armv6 and armv7 are both "armhf" so we can skip the duplicate
+ if [[ "$filename" == *"armv6"* ]]; then
+ echo "Skipping $filename"
+ continue
+ fi
+
+ echo "Pushing $filename to 'testing'"
+ cloudsmith push deb caddy/testing/any-distro/any-version $filename
+ done
+
+ # Publish stable tags to Cloudsmith to both repos, "stable" and "testing"
+ # See https://cloudsmith.io/~caddy/repos/stable/
+ - name: Publish .deb to Cloudsmith (stable tags)
+ if: ${{ steps.vars.outputs.tag_special == '' }}
+ env:
+ CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }}
+ run: |
+ for filename in dist/*.deb; do
+ # armv6 and armv7 are both "armhf" so we can skip the duplicate
+ if [[ "$filename" == *"armv6"* ]]; then
+ echo "Skipping $filename"
+ continue
+ fi
+
+ echo "Pushing $filename to 'stable'"
+ cloudsmith push deb caddy/stable/any-distro/any-version $filename
+
+ echo "Pushing $filename to 'testing'"
+ cloudsmith push deb caddy/testing/any-distro/any-version $filename
+ done
diff --git a/.github/workflows/release_published.yml b/.github/workflows/release_published.yml
new file mode 100644
index 00000000000..491dae75db4
--- /dev/null
+++ b/.github/workflows/release_published.yml
@@ -0,0 +1,35 @@
+name: Release Published
+
+# Event payload: https://developer.github.com/webhooks/event-payloads/#release
+on:
+ release:
+ types: [published]
+
+jobs:
+ release:
+ name: Release Published
+ strategy:
+ matrix:
+ os:
+ - ubuntu-latest
+ runs-on: ${{ matrix.os }}
+
+ steps:
+
+ # See https://github.com/peter-evans/repository-dispatch
+ - name: Trigger event on caddyserver/dist
+ uses: peter-evans/repository-dispatch@v3
+ with:
+ token: ${{ secrets.REPO_DISPATCH_TOKEN }}
+ repository: caddyserver/dist
+ event-type: release-tagged
+ client-payload: '{"tag": "${{ github.event.release.tag_name }}"}'
+
+ - name: Trigger event on caddyserver/caddy-docker
+ uses: peter-evans/repository-dispatch@v3
+ with:
+ token: ${{ secrets.REPO_DISPATCH_TOKEN }}
+ repository: caddyserver/caddy-docker
+ event-type: release-tagged
+ client-payload: '{"tag": "${{ github.event.release.tag_name }}"}'
+
diff --git a/.gitignore b/.gitignore
index 4f3845ed411..381bf74030c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,19 +1,31 @@
-.DS_Store
-Thumbs.db
_gitignore/
-Vagrantfile
-.vagrant/
-/.idea
+*.log
+Caddyfile
+Caddyfile.*
+!caddyfile/
+!caddyfile.go
-dist/builds/
-dist/release/
+# artifacts from pprof tooling
+*.prof
+*.test
-error.log
-access.log
+# build artifacts and helpers
+cmd/caddy/caddy
+cmd/caddy/caddy.exe
+cmd/caddy/tmp/*.exe
+cmd/caddy/.env
-/*.conf
-Caddyfile
+# mac specific
+.DS_Store
+
+# go modules
+vendor
-og_static/
+# goreleaser artifacts
+dist
+caddy-build
+caddy-dist
-.vscode/
\ No newline at end of file
+# IDE files
+.idea/
+.vscode/
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 00000000000..aecff563eed
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,182 @@
+linters-settings:
+ errcheck:
+ exclude-functions:
+ - fmt.*
+ - (go.uber.org/zap/zapcore.ObjectEncoder).AddObject
+ - (go.uber.org/zap/zapcore.ObjectEncoder).AddArray
+ gci:
+ sections:
+ - standard # Standard section: captures all standard packages.
+ - default # Default section: contains all imports that could not be matched to another section type.
+ - prefix(github.com/caddyserver/caddy/v2/cmd) # ensure that this is always at the top and always has a line break.
+ - prefix(github.com/caddyserver/caddy) # Custom section: groups all imports with the specified Prefix.
+ # Skip generated files.
+ # Default: true
+ skip-generated: true
+ # Enable custom order of sections.
+ # If `true`, make the section order the same as the order of `sections`.
+ # Default: false
+ custom-order: true
+ exhaustive:
+ ignore-enum-types: reflect.Kind|svc.Cmd
+
+linters:
+ disable-all: true
+ enable:
+ - asasalint
+ - asciicheck
+ - bidichk
+ - bodyclose
+ - decorder
+ - dogsled
+ - dupl
+ - dupword
+ - durationcheck
+ - errcheck
+ - errname
+ - exhaustive
+ - gci
+ - gofmt
+ - goimports
+ - gofumpt
+ - gosec
+ - gosimple
+ - govet
+ - ineffassign
+ - importas
+ - misspell
+ - prealloc
+ - promlinter
+ - sloglint
+ - sqlclosecheck
+ - staticcheck
+ - tenv
+ - testableexamples
+ - testifylint
+ - tparallel
+ - typecheck
+ - unconvert
+ - unused
+ - wastedassign
+ - whitespace
+ - zerologlint
+ # these are implicitly disabled:
+ # - containedctx
+ # - contextcheck
+ # - cyclop
+ # - depguard
+ # - errchkjson
+ # - errorlint
+ # - exhaustruct
+ # - execinquery
+ # - exhaustruct
+ # - forbidigo
+ # - forcetypeassert
+ # - funlen
+ # - ginkgolinter
+ # - gocheckcompilerdirectives
+ # - gochecknoglobals
+ # - gochecknoinits
+ # - gochecksumtype
+ # - gocognit
+ # - goconst
+ # - gocritic
+ # - gocyclo
+ # - godot
+ # - godox
+ # - goerr113
+ # - goheader
+ # - gomnd
+ # - gomoddirectives
+ # - gomodguard
+ # - goprintffuncname
+ # - gosmopolitan
+ # - grouper
+ # - inamedparam
+ # - interfacebloat
+ # - ireturn
+ # - lll
+ # - loggercheck
+ # - maintidx
+ # - makezero
+ # - mirror
+ # - musttag
+ # - nakedret
+ # - nestif
+ # - nilerr
+ # - nilnil
+ # - nlreturn
+ # - noctx
+ # - nolintlint
+ # - nonamedreturns
+ # - nosprintfhostport
+ # - paralleltest
+ # - perfsprint
+ # - predeclared
+ # - protogetter
+ # - reassign
+ # - revive
+ # - rowserrcheck
+ # - stylecheck
+ # - tagalign
+ # - tagliatelle
+ # - testpackage
+ # - thelper
+ # - unparam
+ # - usestdlibvars
+ # - varnamelen
+ # - wrapcheck
+ # - wsl
+
+run:
+ # default concurrency is a available CPU number.
+ # concurrency: 4 # explicitly omit this value to fully utilize available resources.
+ timeout: 5m
+ issues-exit-code: 1
+ tests: false
+
+# output configuration options
+output:
+ formats:
+ - format: 'colored-line-number'
+ print-issued-lines: true
+ print-linter-name: true
+
+issues:
+ exclude-rules:
+ - text: 'G115' # TODO: Either we should fix the issues or nuke the linter if it's bad
+ linters:
+ - gosec
+ # we aren't calling unknown URL
+ - text: 'G107' # G107: Url provided to HTTP request as taint input
+ linters:
+ - gosec
+ # as a web server that's expected to handle any template, this is totally in the hands of the user.
+ - text: 'G203' # G203: Use of unescaped data in HTML templates
+ linters:
+ - gosec
+ # we're shelling out to known commands, not relying on user-defined input.
+ - text: 'G204' # G204: Audit use of command execution
+ linters:
+ - gosec
+ # the choice of weakrand is deliberate, hence the named import "weakrand"
+ - path: modules/caddyhttp/reverseproxy/selectionpolicies.go
+ text: 'G404' # G404: Insecure random number source (rand)
+ linters:
+ - gosec
+ - path: modules/caddyhttp/reverseproxy/streaming.go
+ text: 'G404' # G404: Insecure random number source (rand)
+ linters:
+ - gosec
+ - path: modules/logging/filters.go
+ linters:
+ - dupl
+ - path: modules/caddyhttp/matchers.go
+ linters:
+ - dupl
+ - path: modules/caddyhttp/vars.go
+ linters:
+ - dupl
+ - path: _test\.go
+ linters:
+ - errcheck
diff --git a/.goreleaser.yml b/.goreleaser.yml
new file mode 100644
index 00000000000..c7ed4b365e4
--- /dev/null
+++ b/.goreleaser.yml
@@ -0,0 +1,214 @@
+version: 2
+
+before:
+ hooks:
+ # The build is done in this particular way to build Caddy in a designated directory named in .gitignore.
+ # This is so we can run goreleaser on tag without Git complaining of being dirty. The main.go in cmd/caddy directory
+ # cannot be built within that directory due to changes necessary for the build causing Git to be dirty, which
+ # subsequently causes gorleaser to refuse running.
+ - rm -rf caddy-build caddy-dist vendor
+ # vendor Caddy deps
+ - go mod vendor
+ - mkdir -p caddy-build
+ - cp cmd/caddy/main.go caddy-build/main.go
+ - /bin/sh -c 'cd ./caddy-build && go mod init caddy'
+ # prepare syso files for windows embedding
+ - /bin/sh -c 'for a in amd64 arm arm64; do XCADDY_SKIP_BUILD=1 GOOS=windows GOARCH=$a xcaddy build {{.Env.TAG}}; done'
+ - /bin/sh -c 'mv /tmp/buildenv_*/*.syso caddy-build'
+ # GoReleaser doesn't seem to offer {{.Tag}} at this stage, so we have to embed it into the env
+ # so we run: TAG=$(git describe --abbrev=0) goreleaser release --rm-dist --skip-publish --skip-validate
+ - go mod edit -require=github.com/caddyserver/caddy/v2@{{.Env.TAG}} ./caddy-build/go.mod
+ # as of Go 1.16, `go` commands no longer automatically change go.{mod,sum}. We now have to explicitly
+ # run `go mod tidy`. The `/bin/sh -c '...'` is because goreleaser can't find cd in PATH without shell invocation.
+ - /bin/sh -c 'cd ./caddy-build && go mod tidy'
+ # vendor the deps of the prepared to-build module
+ - /bin/sh -c 'cd ./caddy-build && go mod vendor'
+ - git clone --depth 1 https://github.com/caddyserver/dist caddy-dist
+ - mkdir -p caddy-dist/man
+ - go mod download
+ - go run cmd/caddy/main.go manpage --directory ./caddy-dist/man
+ - gzip -r ./caddy-dist/man/
+ - /bin/sh -c 'go run cmd/caddy/main.go completion bash > ./caddy-dist/scripts/bash-completion'
+
+builds:
+- env:
+ - CGO_ENABLED=0
+ - GO111MODULE=on
+ dir: ./caddy-build
+ binary: caddy
+ goos:
+ - darwin
+ - linux
+ - windows
+ - freebsd
+ goarch:
+ - amd64
+ - arm
+ - arm64
+ - s390x
+ - ppc64le
+ - riscv64
+ goarm:
+ - "5"
+ - "6"
+ - "7"
+ ignore:
+ - goos: darwin
+ goarch: arm
+ - goos: darwin
+ goarch: ppc64le
+ - goos: darwin
+ goarch: s390x
+ - goos: darwin
+ goarch: riscv64
+ - goos: windows
+ goarch: ppc64le
+ - goos: windows
+ goarch: s390x
+ - goos: windows
+ goarch: riscv64
+ - goos: freebsd
+ goarch: ppc64le
+ - goos: freebsd
+ goarch: s390x
+ - goos: freebsd
+ goarch: riscv64
+ - goos: freebsd
+ goarch: arm
+ goarm: "5"
+ flags:
+ - -trimpath
+ - -mod=readonly
+ ldflags:
+ - -s -w
+ tags:
+ - nobadger
+ - nomysql
+ - nopgx
+
+signs:
+ - cmd: cosign
+ signature: "${artifact}.sig"
+ certificate: '{{ trimsuffix (trimsuffix .Env.artifact ".zip") ".tar.gz" }}.pem'
+ args: ["sign-blob", "--yes", "--output-signature=${signature}", "--output-certificate", "${certificate}", "${artifact}"]
+ artifacts: all
+
+sboms:
+ - artifacts: binary
+ documents:
+ - >-
+ {{ .ProjectName }}_
+ {{- .Version }}_
+ {{- if eq .Os "darwin" }}mac{{ else }}{{ .Os }}{{ end }}_
+ {{- .Arch }}
+ {{- with .Arm }}v{{ . }}{{ end }}
+ {{- with .Mips }}_{{ . }}{{ end }}
+ {{- if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}.sbom
+ cmd: syft
+ args: ["$artifact", "--file", "${document}", "--output", "cyclonedx-json"]
+
+archives:
+ - id: default
+ format_overrides:
+ - goos: windows
+ formats: zip
+ name_template: >-
+ {{ .ProjectName }}_
+ {{- .Version }}_
+ {{- if eq .Os "darwin" }}mac{{ else }}{{ .Os }}{{ end }}_
+ {{- .Arch }}
+ {{- with .Arm }}v{{ . }}{{ end }}
+ {{- with .Mips }}_{{ . }}{{ end }}
+ {{- if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}
+
+ # package the 'caddy-build' directory into a tarball,
+ # allowing users to build the exact same set of files as ours.
+ - id: source
+ meta: true
+ name_template: "{{ .ProjectName }}_{{ .Version }}_buildable-artifact"
+ files:
+ - src: LICENSE
+ dst: ./LICENSE
+ - src: README.md
+ dst: ./README.md
+ - src: AUTHORS
+ dst: ./AUTHORS
+ - src: ./caddy-build
+ dst: ./
+
+source:
+ enabled: true
+ name_template: '{{ .ProjectName }}_{{ .Version }}_src'
+ format: 'tar.gz'
+
+ # Additional files/template/globs you want to add to the source archive.
+ #
+ # Default: empty.
+ files:
+ - vendor
+
+
+checksum:
+ algorithm: sha512
+
+nfpms:
+ - id: default
+ package_name: caddy
+
+ vendor: Dyanim
+ homepage: https://caddyserver.com
+ maintainer: Matthew Holt
+ description: |
+ Caddy - Powerful, enterprise-ready, open source web server with automatic HTTPS written in Go
+ license: Apache 2.0
+
+ formats:
+ - deb
+ # - rpm
+
+ bindir: /usr/bin
+ contents:
+ - src: ./caddy-dist/init/caddy.service
+ dst: /lib/systemd/system/caddy.service
+
+ - src: ./caddy-dist/init/caddy-api.service
+ dst: /lib/systemd/system/caddy-api.service
+
+ - src: ./caddy-dist/welcome/index.html
+ dst: /usr/share/caddy/index.html
+
+ - src: ./caddy-dist/scripts/bash-completion
+ dst: /etc/bash_completion.d/caddy
+
+ - src: ./caddy-dist/config/Caddyfile
+ dst: /etc/caddy/Caddyfile
+ type: config
+
+ - src: ./caddy-dist/man/*
+ dst: /usr/share/man/man8/
+
+ scripts:
+ postinstall: ./caddy-dist/scripts/postinstall.sh
+ preremove: ./caddy-dist/scripts/preremove.sh
+ postremove: ./caddy-dist/scripts/postremove.sh
+
+ provides:
+ - httpd
+
+release:
+ github:
+ owner: caddyserver
+ name: caddy
+ draft: true
+ prerelease: auto
+
+changelog:
+ sort: asc
+ filters:
+ exclude:
+ - '^chore:'
+ - '^ci:'
+ - '^docs?:'
+ - '^readme:'
+ - '^tests?:'
+ - '^\w+\s+' # a hack to remove commit messages without colons thus don't correspond to a package
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 1eba83c9092..00000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-language: go
-
-go:
- - 1.8.3
- - tip
-
-matrix:
- allow_failures:
- - go: tip
- fast_finish: true
-
-before_install:
- # Decrypts a script that installs an authenticated cookie
- # for git to use when cloning from googlesource.com.
- # Bypasses "bandwidth limit exceeded" errors.
- # See github.com/golang/go/issues/12933
- - if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then openssl aes-256-cbc -K $encrypted_3df18f9af81d_key -iv $encrypted_3df18f9af81d_iv -in dist/gitcookie.sh.enc -out dist/gitcookie.sh -d; fi
-
-install:
- - if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then bash dist/gitcookie.sh; fi
- - go get -t ./...
- - go get github.com/golang/lint/golint
- - go get github.com/FiloSottile/vendorcheck
- # Install gometalinter and certain linters
- - go get github.com/alecthomas/gometalinter
- - go get github.com/client9/misspell/cmd/misspell
- - go get github.com/gordonklaus/ineffassign
- - go get golang.org/x/tools/cmd/goimports
- - go get github.com/tsenart/deadcode
-
-script:
- - gometalinter --disable-all -E vet -E gofmt -E misspell -E ineffassign -E goimports -E deadcode --tests --vendor ./...
- - vendorcheck ./...
- # TODO: When Go 1.9 is released, replace $(go list) subcommand with ./... because vendor folder should be ignored
- - go test -race $(go list ./... | grep -v vendor)
-
-after_script:
- # TODO: When Go 1.9 is released, replace $(go list) subcommand with ./... because vendor folder should be ignored
- - golint $(go list ./... | grep -v vendor)
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 00000000000..3635dd880d5
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,10 @@
+# This is the official list of Caddy Authors for copyright purposes.
+# Authors may be either individual people or legal entities.
+#
+# Not all individual contributors are authors. For the full list of
+# contributors, refer to the project's page on GitHub or the repo's
+# commit history.
+
+Matthew Holt
+Light Code Labs
+Ardan Labs
diff --git a/vendor/google.golang.org/appengine/LICENSE b/LICENSE
similarity index 100%
rename from vendor/google.golang.org/appengine/LICENSE
rename to LICENSE
diff --git a/LICENSE.txt b/LICENSE.txt
deleted file mode 100644
index 8dada3edaf5..00000000000
--- a/LICENSE.txt
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/README.md b/README.md
index 247e3bad0b2..3f071e6f0bd 100644
--- a/README.md
+++ b/README.md
@@ -1,158 +1,200 @@
----
-
-Caddy is fast, easy to use, and makes you more productive.
-Available for Windows, Mac, Linux, BSD, Solaris, and [Android](https://github.com/mholt/caddy/wiki/Running-Caddy-on-Android).
-## Menu
+### Menu
- [Features](#features)
- [Install](#install)
-- [Quick Start](#quick-start)
-- [Running in Production](#running-in-production)
-- [Contributing](#contributing)
-- [Donors](#donors)
-- [About the Project](#about-the-project)
-
-## Features
-
-- **Easy configuration** with the Caddyfile
-- **Automatic HTTPS** on by default (via [Let's Encrypt](https://letsencrypt.org))
-- **HTTP/2** by default
-- **Virtual hosting** so multiple sites just work
-- Experimental **QUIC support** for those that like speed
-- TLS session ticket **key rotation** for more secure connections
-- **Extensible with plugins** because a convenient web server is a helpful one
-- **Runs anywhere** with **no external dependencies** (not even libc)
+- [Build from source](#build-from-source)
+ - [For development](#for-development)
+ - [With version information and/or plugins](#with-version-information-andor-plugins)
+- [Quick start](#quick-start)
+- [Overview](#overview)
+- [Full documentation](#full-documentation)
+- [Getting help](#getting-help)
+- [About](#about)
+
+
-There's way more, too! [See all features built into Caddy.](https://caddyserver.com/features) On top of all those, Caddy does even more with plugins: choose which plugins you want at [download](https://caddyserver.com/download).
+## [Features](https://caddyserver.com/features)
+
+- **Easy configuration** with the [Caddyfile](https://caddyserver.com/docs/caddyfile)
+- **Powerful configuration** with its [native JSON config](https://caddyserver.com/docs/json/)
+- **Dynamic configuration** with the [JSON API](https://caddyserver.com/docs/api)
+- [**Config adapters**](https://caddyserver.com/docs/config-adapters) if you don't like JSON
+- **Automatic HTTPS** by default
+ - [ZeroSSL](https://zerossl.com) and [Let's Encrypt](https://letsencrypt.org) for public names
+ - Fully-managed local CA for internal names & IPs
+ - Can coordinate with other Caddy instances in a cluster
+ - Multi-issuer fallback
+- **Stays up when other servers go down** due to TLS/OCSP/certificate-related issues
+- **Production-ready** after serving trillions of requests and managing millions of TLS certificates
+- **Scales to hundreds of thousands of sites** as proven in production
+- **HTTP/1.1, HTTP/2, and HTTP/3** all supported by default
+- **Highly extensible** [modular architecture](https://caddyserver.com/docs/architecture) lets Caddy do anything without bloat
+- **Runs anywhere** with **no external dependencies** (not even libc)
+- Written in Go, a language with higher **memory safety guarantees** than other servers
+- Actually **fun to use**
+- So much more to [discover](https://caddyserver.com/features)
## Install
-Caddy binaries have no dependencies and are available for every platform. Get Caddy any one of these ways:
+The simplest, cross-platform way to get started is to download Caddy from [GitHub Releases](https://github.com/caddyserver/caddy/releases) and place the executable file in your PATH.
-- **[Download page](https://caddyserver.com/download)** allows you to
-customize your build in the browser
-- **[Latest release](https://github.com/mholt/caddy/releases/latest)** for
-pre-built, vanilla binaries
-- **go get** to build from source: `go get github.com/mholt/caddy/caddy` (requires Go 1.8 or newer)
+See [our online documentation](https://caddyserver.com/docs/install) for other install instructions.
-Then make sure the `caddy` binary is in your PATH.
+## Build from source
+Requirements:
-## Quick Start
+- [Go 1.22.3 or newer](https://golang.org/dl/)
-To serve static files from the current working directory, run:
+### For development
+_**Note:** These steps [will not embed proper version information](https://github.com/golang/go/issues/29228). For that, please follow the instructions in the next section._
+
+```bash
+$ git clone "https://github.com/caddyserver/caddy.git"
+$ cd caddy/cmd/caddy/
+$ go build
```
-caddy
+
+When you run Caddy, it may try to bind to low ports unless otherwise specified in your config. If your OS requires elevated privileges for this, you will need to give your new binary permission to do so. On Linux, this can be done easily with: `sudo setcap cap_net_bind_service=+ep ./caddy`
+
+If you prefer to use `go run` which only creates temporary binaries, you can still do this with the included `setcap.sh` like so:
+
+```bash
+$ go run -exec ./setcap.sh main.go
+```
+
+If you don't want to type your password for `setcap`, use `sudo visudo` to edit your sudoers file and allow your user account to run that command without a password, for example:
+
+```
+username ALL=(ALL:ALL) NOPASSWD: /usr/sbin/setcap
```
-Caddy's default port is 2015, so open your browser to [http://localhost:2015](http://localhost:2015).
+replacing `username` with your actual username. Please be careful and only do this if you know what you are doing! We are only qualified to document how to use Caddy, not Go tooling or your computer, and we are providing these instructions for convenience only; please learn how to use your own computer at your own risk and make any needful adjustments.
-### Go from 0 to HTTPS in 5 seconds
+### With version information and/or plugins
-If the `caddy` binary has permission to bind to low ports and your domain name's DNS records point to the machine you're on:
+Using [our builder tool, `xcaddy`](https://github.com/caddyserver/xcaddy)...
```
-caddy -host example.com
+$ xcaddy build
```
-This command serves static files from the current directory over HTTPS. Certificates are automatically obtained and renewed for you!
+...the following steps are automated:
-### Customizing your site
+1. Create a new folder: `mkdir caddy`
+2. Change into it: `cd caddy`
+3. Copy [Caddy's main.go](https://github.com/caddyserver/caddy/blob/master/cmd/caddy/main.go) into the empty folder. Add imports for any custom plugins you want to add.
+4. Initialize a Go module: `go mod init caddy`
+5. (Optional) Pin Caddy version: `go get github.com/caddyserver/caddy/v2@version` replacing `version` with a git tag, commit, or branch name.
+6. (Optional) Add plugins by adding their import: `_ "import/path/here"`
+7. Compile: `go build -tags=nobadger,nomysql,nopgx`
-To customize how your site is served, create a file named Caddyfile by your site and paste this into it:
-```plain
-localhost
-push
-browse
-websocket /echo cat
-ext .html
-log /var/log/access.log
-proxy /api 127.0.0.1:7005
-header /api Access-Control-Allow-Origin *
-```
-When you run `caddy` in that directory, it will automatically find and use that Caddyfile.
+## Quick start
+
+The [Caddy website](https://caddyserver.com/docs/) has documentation that includes tutorials, quick-start guides, reference, and more.
+
+**We recommend that all users -- regardless of experience level -- do our [Getting Started](https://caddyserver.com/docs/getting-started) guide to become familiar with using Caddy.**
+
+If you've only got a minute, [the website has several quick-start tutorials](https://caddyserver.com/docs/quick-starts) to choose from! However, after finishing a quick-start tutorial, please read more documentation to understand how the software works. 🙂
+
+
-This simple file enables server push (via Link headers), allows directory browsing (for folders without an index file), hosts a WebSocket echo server at /echo, serves clean URLs, logs requests to an access log, proxies all API requests to a backend on port 7005, and adds the coveted `Access-Control-Allow-Origin: *` header for all responses from the API.
-Wow! Caddy can do a lot with just a few lines.
+## Overview
-### Doing more with Caddy
+Caddy is most often used as an HTTPS server, but it is suitable for any long-running Go program. First and foremost, it is a platform to run Go applications. Caddy "apps" are just Go programs that are implemented as Caddy modules. Two apps -- `tls` and `http` -- ship standard with Caddy.
-To host multiple sites and do more with the Caddyfile, please see the [Caddyfile tutorial](https://caddyserver.com/tutorial/caddyfile).
+Caddy apps instantly benefit from [automated documentation](https://caddyserver.com/docs/json/), graceful on-line [config changes via API](https://caddyserver.com/docs/api), and unification with other Caddy apps.
-Sites with qualifying hostnames are served over [HTTPS by default](https://caddyserver.com/docs/automatic-https).
+Although [JSON](https://caddyserver.com/docs/json/) is Caddy's native config language, Caddy can accept input from [config adapters](https://caddyserver.com/docs/config-adapters) which can essentially convert any config format of your choice into JSON: Caddyfile, JSON 5, YAML, TOML, NGINX config, and more.
-Caddy has a command line interface. Run `caddy -h` to view basic help or see the [CLI documentation](https://caddyserver.com/docs/cli) for details.
+The primary way to configure Caddy is through [its API](https://caddyserver.com/docs/api), but if you prefer config files, the [command-line interface](https://caddyserver.com/docs/command-line) supports those too.
+Caddy exposes an unprecedented level of control compared to any web server in existence. In Caddy, you are usually setting the actual values of the initialized types in memory that power everything from your HTTP handlers and TLS handshakes to your storage medium. Caddy is also ridiculously extensible, with a powerful plugin system that makes vast improvements over other web servers.
-## Running in Production
+To wield the power of this design, you need to know how the config document is structured. Please see [our documentation site](https://caddyserver.com/docs/) for details about [Caddy's config structure](https://caddyserver.com/docs/json/).
-Caddy is production-ready if you find it to be a good fit for your site and workflow.
+Nearly all of Caddy's configuration is contained in a single config document, rather than being scattered across CLI flags and env variables and a configuration file as with other web servers. This makes managing your server config more straightforward and reduces hidden variables/factors.
-**Running as root:** We advise against this. You can still listen on ports < 1024 on Linux using setcap like so: `sudo setcap cap_net_bind_service=+ep ./caddy`
-The Caddy project does not officially maintain any system-specific integrations nor suggest how to administer your own system. But your download file includes [unofficial resources](https://github.com/mholt/caddy/tree/master/dist/init) contributed by the community that you may find helpful for running Caddy in production.
+## Full documentation
-How you choose to run Caddy is up to you. Many users are satisfied with `nohup caddy &`. Others use `screen`. Users who need Caddy to come back up after reboots either do so in the script that caused the reboot, add a command to an init script, or configure a service with their OS.
+Our website has complete documentation:
-If you have questions or concerns about Caddy' underlying crypto implementations, consult Go's [crypto packages](https://golang.org/pkg/crypto), starting with their documentation, then issues, then the code itself; as Caddy uses mainly those libraries.
+**https://caddyserver.com/docs/**
+The docs are also open source. You can contribute to them here: https://github.com/caddyserver/website
-## Contributing
-**[Join our forum](https://caddy.community) where you can chat with other Caddy users and developers!** To get familiar with the code base, try [Caddy code search on Sourcegraph](https://sourcegraph.com/github.com/mholt/caddy/-/search)!
-Please see our [contributing guidelines](https://github.com/mholt/caddy/blob/master/.github/CONTRIBUTING.md) for instructions. If you want to write a plugin, check out the [developer wiki](https://github.com/mholt/caddy/wiki).
+## Getting help
-We use GitHub issues and pull requests only for discussing bug reports and the development of specific changes. We welcome all other topics on the [forum](https://caddy.community)!
+- We advise companies using Caddy to secure a support contract through [Ardan Labs](https://www.ardanlabs.com) before help is needed.
-If you want to contribute to the documentation, please submit pull requests to [caddyserver/website](https://github.com/caddyserver/website).
+- A [sponsorship](https://github.com/sponsors/mholt) goes a long way! We can offer private help to sponsors. If Caddy is benefitting your company, please consider a sponsorship. This not only helps fund full-time work to ensure the longevity of the project, it provides your company the resources, support, and discounts you need; along with being a great look for your company to your customers and potential customers!
-Thanks for making Caddy -- and the Web -- better!
+- Individuals can exchange help for free on our community forum at https://caddy.community. Remember that people give help out of their spare time and good will. The best way to get help is to give it first!
+Please use our [issue tracker](https://github.com/caddyserver/caddy/issues) only for bug reports and feature requests, i.e. actionable development items (support questions will usually be referred to the forums).
-## Donors
-- [DigitalOcean](https://m.do.co/c/6d7bdafccf96) is hosting the Caddy project.
-- [DNSimple](https://dnsimple.link/resolving-caddy) provides DNS services for Caddy's sites.
-- [DNS Spy](https://dnsspy.io) keeps an eye on Caddy's DNS properties.
-We thank them for their services. **If you want to help keep Caddy free, please [become a sponsor](https://caddyserver.com/pricing)!**
+## About
+Matthew Holt began developing Caddy in 2014 while studying computer science at Brigham Young University. (The name "Caddy" was chosen because this software helps with the tedious, mundane tasks of serving the Web, and is also a single place for multiple things to be organized together.) It soon became the first web server to use HTTPS automatically and by default, and now has hundreds of contributors and has served trillions of HTTPS requests.
-## About the Project
+**The name "Caddy" is trademarked.** The name of the software is "Caddy", not "Caddy Server" or "CaddyServer". Please call it "Caddy" or, if you wish to clarify, "the Caddy web server". Caddy is a registered trademark of Stack Holdings GmbH.
-Caddy was born out of the need for a "batteries-included" web server that runs anywhere and doesn't have to take its configuration with it. Caddy took inspiration from [spark](https://github.com/rif/spark), [nginx](https://github.com/nginx/nginx), lighttpd,
-[Websocketd](https://github.com/joewalnes/websocketd) and [Vagrant](https://www.vagrantup.com/), which provides a pleasant mixture of features from each of them.
+- _Project on Twitter: [@caddyserver](https://twitter.com/caddyserver)_
+- _Author on Twitter: [@mholt6](https://twitter.com/mholt6)_
-**The name "Caddy":** The name of the software is "Caddy", not "Caddy Server" or "CaddyServer". Please call it "Caddy" or, if you wish to clarify, "the Caddy web server". See [brand guidelines](https://caddyserver.com/brand).
+Caddy is a project of [ZeroSSL](https://zerossl.com), a Stack Holdings company.
-*Author on Twitter: [@mholt6](https://twitter.com/mholt6)*
+Debian package repository hosting is graciously provided by [Cloudsmith](https://cloudsmith.com). Cloudsmith is the only fully hosted, cloud-native, universal package management solution, that enables your organization to create, store and share packages in any format, to any place, with total confidence.
diff --git a/admin.go b/admin.go
new file mode 100644
index 00000000000..6df5a23f7d7
--- /dev/null
+++ b/admin.go
@@ -0,0 +1,1407 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddy
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "expvar"
+ "fmt"
+ "hash"
+ "io"
+ "net"
+ "net/http"
+ "net/http/pprof"
+ "net/url"
+ "os"
+ "path"
+ "regexp"
+ "slices"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/caddyserver/certmagic"
+ "github.com/cespare/xxhash/v2"
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+func init() {
+ // The hard-coded default `DefaultAdminListen` can be overridden
+ // by setting the `CADDY_ADMIN` environment variable.
+ // The environment variable may be used by packagers to change
+ // the default admin address to something more appropriate for
+ // that platform. See #5317 for discussion.
+ if env, exists := os.LookupEnv("CADDY_ADMIN"); exists {
+ DefaultAdminListen = env
+ }
+}
+
+// AdminConfig configures Caddy's API endpoint, which is used
+// to manage Caddy while it is running.
+type AdminConfig struct {
+ // If true, the admin endpoint will be completely disabled.
+ // Note that this makes any runtime changes to the config
+ // impossible, since the interface to do so is through the
+ // admin endpoint.
+ Disabled bool `json:"disabled,omitempty"`
+
+ // The address to which the admin endpoint's listener should
+ // bind itself. Can be any single network address that can be
+ // parsed by Caddy. Accepts placeholders.
+ // Default: the value of the `CADDY_ADMIN` environment variable,
+ // or `localhost:2019` otherwise.
+ //
+ // Remember: When changing this value through a config reload,
+ // be sure to use the `--address` CLI flag to specify the current
+ // admin address if the currently-running admin endpoint is not
+ // the default address.
+ Listen string `json:"listen,omitempty"`
+
+ // If true, CORS headers will be emitted, and requests to the
+ // API will be rejected if their `Host` and `Origin` headers
+ // do not match the expected value(s). Use `origins` to
+ // customize which origins/hosts are allowed. If `origins` is
+ // not set, the listen address is the only value allowed by
+ // default. Enforced only on local (plaintext) endpoint.
+ EnforceOrigin bool `json:"enforce_origin,omitempty"`
+
+ // The list of allowed origins/hosts for API requests. Only needed
+ // if accessing the admin endpoint from a host different from the
+ // socket's network interface or if `enforce_origin` is true. If not
+ // set, the listener address will be the default value. If set but
+ // empty, no origins will be allowed. Enforced only on local
+ // (plaintext) endpoint.
+ Origins []string `json:"origins,omitempty"`
+
+ // Options pertaining to configuration management.
+ Config *ConfigSettings `json:"config,omitempty"`
+
+ // Options that establish this server's identity. Identity refers to
+ // credentials which can be used to uniquely identify and authenticate
+ // this server instance. This is required if remote administration is
+ // enabled (but does not require remote administration to be enabled).
+ // Default: no identity management.
+ Identity *IdentityConfig `json:"identity,omitempty"`
+
+ // Options pertaining to remote administration. By default, remote
+ // administration is disabled. If enabled, identity management must
+ // also be configured, as that is how the endpoint is secured.
+ // See the neighboring "identity" object.
+ //
+ // EXPERIMENTAL: This feature is subject to change.
+ Remote *RemoteAdmin `json:"remote,omitempty"`
+
+ // Holds onto the routers so that we can later provision them
+ // if they require provisioning.
+ routers []AdminRouter
+}
+
+// ConfigSettings configures the management of configuration.
+type ConfigSettings struct {
+ // Whether to keep a copy of the active config on disk. Default is true.
+ // Note that "pulled" dynamic configs (using the neighboring "load" module)
+ // are not persisted; only configs that are pushed to Caddy get persisted.
+ Persist *bool `json:"persist,omitempty"`
+
+ // Loads a new configuration. This is helpful if your configs are
+ // managed elsewhere and you want Caddy to pull its config dynamically
+ // when it starts. The pulled config completely replaces the current
+ // one, just like any other config load. It is an error if a pulled
+ // config is configured to pull another config without a load_delay,
+ // as this creates a tight loop.
+ //
+ // EXPERIMENTAL: Subject to change.
+ LoadRaw json.RawMessage `json:"load,omitempty" caddy:"namespace=caddy.config_loaders inline_key=module"`
+
+ // The duration after which to load config. If set, config will be pulled
+ // from the config loader after this duration. A delay is required if a
+ // dynamically-loaded config is configured to load yet another config. To
+ // load configs on a regular interval, ensure this value is set the same
+ // on all loaded configs; it can also be variable if needed, and to stop
+ // the loop, simply remove dynamic config loading from the next-loaded
+ // config.
+ //
+ // EXPERIMENTAL: Subject to change.
+ LoadDelay Duration `json:"load_delay,omitempty"`
+}
+
+// IdentityConfig configures management of this server's identity. An identity
+// consists of credentials that uniquely verify this instance; for example,
+// TLS certificates (public + private key pairs).
+type IdentityConfig struct {
+ // List of names or IP addresses which refer to this server.
+ // Certificates will be obtained for these identifiers so
+ // secure TLS connections can be made using them.
+ Identifiers []string `json:"identifiers,omitempty"`
+
+ // Issuers that can provide this admin endpoint its identity
+ // certificate(s). Default: ACME issuers configured for
+ // ZeroSSL and Let's Encrypt. Be sure to change this if you
+ // require credentials for private identifiers.
+ IssuersRaw []json.RawMessage `json:"issuers,omitempty" caddy:"namespace=tls.issuance inline_key=module"`
+
+ issuers []certmagic.Issuer
+}
+
+// RemoteAdmin enables and configures remote administration. If enabled,
+// a secure listener enforcing mutual TLS authentication will be started
+// on a different port from the standard plaintext admin server.
+//
+// This endpoint is secured using identity management, which must be
+// configured separately (because identity management does not depend
+// on remote administration). See the admin/identity config struct.
+//
+// EXPERIMENTAL: Subject to change.
+type RemoteAdmin struct {
+ // The address on which to start the secure listener. Accepts placeholders.
+ // Default: :2021
+ Listen string `json:"listen,omitempty"`
+
+ // List of access controls for this secure admin endpoint.
+ // This configures TLS mutual authentication (i.e. authorized
+ // client certificates), but also application-layer permissions
+ // like which paths and methods each identity is authorized for.
+ AccessControl []*AdminAccess `json:"access_control,omitempty"`
+}
+
+// AdminAccess specifies what permissions an identity or group
+// of identities are granted.
+type AdminAccess struct {
+ // Base64-encoded DER certificates containing public keys to accept.
+ // (The contents of PEM certificate blocks are base64-encoded DER.)
+ // Any of these public keys can appear in any part of a verified chain.
+ PublicKeys []string `json:"public_keys,omitempty"`
+
+ // Limits what the associated identities are allowed to do.
+ // If unspecified, all permissions are granted.
+ Permissions []AdminPermissions `json:"permissions,omitempty"`
+
+ publicKeys []crypto.PublicKey
+}
+
+// AdminPermissions specifies what kinds of requests are allowed
+// to be made to the admin endpoint.
+type AdminPermissions struct {
+ // The API paths allowed. Paths are simple prefix matches.
+ // Any subpath of the specified paths will be allowed.
+ Paths []string `json:"paths,omitempty"`
+
+ // The HTTP methods allowed for the given paths.
+ Methods []string `json:"methods,omitempty"`
+}
+
+// newAdminHandler reads admin's config and returns an http.Handler suitable
+// for use in an admin endpoint server, which will be listening on listenAddr.
+func (admin *AdminConfig) newAdminHandler(addr NetworkAddress, remote bool, _ Context) adminHandler {
+ muxWrap := adminHandler{mux: http.NewServeMux()}
+
+ // secure the local or remote endpoint respectively
+ if remote {
+ muxWrap.remoteControl = admin.Remote
+ } else {
+ muxWrap.enforceHost = !addr.isWildcardInterface()
+ muxWrap.allowedOrigins = admin.allowedOrigins(addr)
+ muxWrap.enforceOrigin = admin.EnforceOrigin
+ }
+
+ addRouteWithMetrics := func(pattern string, handlerLabel string, h http.Handler) {
+ labels := prometheus.Labels{"path": pattern, "handler": handlerLabel}
+ h = instrumentHandlerCounter(
+ adminMetrics.requestCount.MustCurryWith(labels),
+ h,
+ )
+ muxWrap.mux.Handle(pattern, h)
+ }
+ // addRoute just calls muxWrap.mux.Handle after
+ // wrapping the handler with error handling
+ addRoute := func(pattern string, handlerLabel string, h AdminHandler) {
+ wrapper := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ err := h.ServeHTTP(w, r)
+ if err != nil {
+ labels := prometheus.Labels{
+ "path": pattern,
+ "handler": handlerLabel,
+ "method": strings.ToUpper(r.Method),
+ }
+ adminMetrics.requestErrors.With(labels).Inc()
+ }
+ muxWrap.handleError(w, r, err)
+ })
+ addRouteWithMetrics(pattern, handlerLabel, wrapper)
+ }
+
+ const handlerLabel = "admin"
+
+ // register standard config control endpoints
+ addRoute("/"+rawConfigKey+"/", handlerLabel, AdminHandlerFunc(handleConfig))
+ addRoute("/id/", handlerLabel, AdminHandlerFunc(handleConfigID))
+ addRoute("/stop", handlerLabel, AdminHandlerFunc(handleStop))
+
+ // register debugging endpoints
+ addRouteWithMetrics("/debug/pprof/", handlerLabel, http.HandlerFunc(pprof.Index))
+ addRouteWithMetrics("/debug/pprof/cmdline", handlerLabel, http.HandlerFunc(pprof.Cmdline))
+ addRouteWithMetrics("/debug/pprof/profile", handlerLabel, http.HandlerFunc(pprof.Profile))
+ addRouteWithMetrics("/debug/pprof/symbol", handlerLabel, http.HandlerFunc(pprof.Symbol))
+ addRouteWithMetrics("/debug/pprof/trace", handlerLabel, http.HandlerFunc(pprof.Trace))
+ addRouteWithMetrics("/debug/vars", handlerLabel, expvar.Handler())
+
+ // register third-party module endpoints
+ for _, m := range GetModules("admin.api") {
+ router := m.New().(AdminRouter)
+ for _, route := range router.Routes() {
+ addRoute(route.Pattern, handlerLabel, route.Handler)
+ }
+ admin.routers = append(admin.routers, router)
+ }
+
+ return muxWrap
+}
+
+// provisionAdminRouters provisions all the router modules
+// in the admin.api namespace that need provisioning.
+func (admin *AdminConfig) provisionAdminRouters(ctx Context) error {
+ for _, router := range admin.routers {
+ provisioner, ok := router.(Provisioner)
+ if !ok {
+ continue
+ }
+
+ err := provisioner.Provision(ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ // We no longer need the routers once provisioned, allow for GC
+ admin.routers = nil
+
+ return nil
+}
+
+// allowedOrigins returns a list of origins that are allowed.
+// If admin.Origins is nil (null), the provided listen address
+// will be used as the default origin. If admin.Origins is
+// empty, no origins will be allowed, effectively bricking the
+// endpoint for non-unix-socket endpoints, but whatever.
+func (admin AdminConfig) allowedOrigins(addr NetworkAddress) []*url.URL {
+ uniqueOrigins := make(map[string]struct{})
+ for _, o := range admin.Origins {
+ uniqueOrigins[o] = struct{}{}
+ }
+ if admin.Origins == nil {
+ if addr.isLoopback() {
+ if addr.IsUnixNetwork() || addr.IsFdNetwork() {
+ // RFC 2616, Section 14.26:
+ // "A client MUST include a Host header field in all HTTP/1.1 request
+ // messages. If the requested URI does not include an Internet host
+ // name for the service being requested, then the Host header field MUST
+ // be given with an empty value."
+ //
+ // UPDATE July 2023: Go broke this by patching a minor security bug in 1.20.6.
+ // Understandable, but frustrating. See:
+ // https://github.com/golang/go/issues/60374
+ // See also the discussion here:
+ // https://github.com/golang/go/issues/61431
+ //
+ // We can no longer conform to RFC 2616 Section 14.26 from either Go or curl
+ // in purity. (Curl allowed no host between 7.40 and 7.50, but now requires a
+ // bogus host; see https://superuser.com/a/925610.) If we disable Host/Origin
+ // security checks, the infosec community assures me that it is secure to do
+ // so, because:
+ // 1) Browsers do not allow access to unix sockets
+ // 2) DNS is irrelevant to unix sockets
+ //
+ // I am not quite ready to trust either of those external factors, so instead
+ // of disabling Host/Origin checks, we now allow specific Host values when
+ // accessing the admin endpoint over unix sockets. I definitely don't trust
+ // DNS (e.g. I don't trust 'localhost' to always resolve to the local host),
+ // and IP shouldn't even be used, but if it is for some reason, I think we can
+ // at least be reasonably assured that 127.0.0.1 and ::1 route to the local
+ // machine, meaning that a hypothetical browser origin would have to be on the
+ // local machine as well.
+ uniqueOrigins[""] = struct{}{}
+ uniqueOrigins["127.0.0.1"] = struct{}{}
+ uniqueOrigins["::1"] = struct{}{}
+ } else {
+ uniqueOrigins[net.JoinHostPort("localhost", addr.port())] = struct{}{}
+ uniqueOrigins[net.JoinHostPort("::1", addr.port())] = struct{}{}
+ uniqueOrigins[net.JoinHostPort("127.0.0.1", addr.port())] = struct{}{}
+ }
+ }
+ if !addr.IsUnixNetwork() && !addr.IsFdNetwork() {
+ uniqueOrigins[addr.JoinHostPort(0)] = struct{}{}
+ }
+ }
+ allowed := make([]*url.URL, 0, len(uniqueOrigins))
+ for originStr := range uniqueOrigins {
+ var origin *url.URL
+ if strings.Contains(originStr, "://") {
+ var err error
+ origin, err = url.Parse(originStr)
+ if err != nil {
+ continue
+ }
+ origin.Path = ""
+ origin.RawPath = ""
+ origin.Fragment = ""
+ origin.RawFragment = ""
+ origin.RawQuery = ""
+ } else {
+ origin = &url.URL{Host: originStr}
+ }
+ allowed = append(allowed, origin)
+ }
+ return allowed
+}
+
+// replaceLocalAdminServer replaces the running local admin server
+// according to the relevant configuration in cfg. If no configuration
+// for the admin endpoint exists in cfg, a default one is used, so
+// that there is always an admin server (unless it is explicitly
+// configured to be disabled).
+// Critically note that some elements and functionality of the context
+// may not be ready, e.g. storage. Tread carefully.
+func replaceLocalAdminServer(cfg *Config, ctx Context) error {
+ // always* be sure to close down the old admin endpoint
+ // as gracefully as possible, even if the new one is
+ // disabled -- careful to use reference to the current
+ // (old) admin endpoint since it will be different
+ // when the function returns
+ // (* except if the new one fails to start)
+ oldAdminServer := localAdminServer
+ var err error
+ defer func() {
+ // do the shutdown asynchronously so that any
+ // current API request gets a response; this
+ // goroutine may last a few seconds
+ if oldAdminServer != nil && err == nil {
+ go func(oldAdminServer *http.Server) {
+ err := stopAdminServer(oldAdminServer)
+ if err != nil {
+ Log().Named("admin").Error("stopping current admin endpoint", zap.Error(err))
+ }
+ }(oldAdminServer)
+ }
+ }()
+
+ // set a default if admin wasn't otherwise configured
+ if cfg.Admin == nil {
+ cfg.Admin = &AdminConfig{
+ Listen: DefaultAdminListen,
+ }
+ }
+
+ // if new admin endpoint is to be disabled, we're done
+ if cfg.Admin.Disabled {
+ Log().Named("admin").Warn("admin endpoint disabled")
+ return nil
+ }
+
+ // extract a singular listener address
+ addr, err := parseAdminListenAddr(cfg.Admin.Listen, DefaultAdminListen)
+ if err != nil {
+ return err
+ }
+
+ handler := cfg.Admin.newAdminHandler(addr, false, ctx)
+
+ ln, err := addr.Listen(context.TODO(), 0, net.ListenConfig{})
+ if err != nil {
+ return err
+ }
+
+ serverMu.Lock()
+ localAdminServer = &http.Server{
+ Addr: addr.String(), // for logging purposes only
+ Handler: handler,
+ ReadTimeout: 10 * time.Second,
+ ReadHeaderTimeout: 5 * time.Second,
+ IdleTimeout: 60 * time.Second,
+ MaxHeaderBytes: 1024 * 64,
+ }
+ serverMu.Unlock()
+
+ adminLogger := Log().Named("admin")
+ go func() {
+ serverMu.Lock()
+ server := localAdminServer
+ serverMu.Unlock()
+ if err := server.Serve(ln.(net.Listener)); !errors.Is(err, http.ErrServerClosed) {
+ adminLogger.Error("admin server shutdown for unknown reason", zap.Error(err))
+ }
+ }()
+
+ adminLogger.Info("admin endpoint started",
+ zap.String("address", addr.String()),
+ zap.Bool("enforce_origin", cfg.Admin.EnforceOrigin),
+ zap.Array("origins", loggableURLArray(handler.allowedOrigins)))
+
+ if !handler.enforceHost {
+ adminLogger.Warn("admin endpoint on open interface; host checking disabled",
+ zap.String("address", addr.String()))
+ }
+
+ return nil
+}
+
+// manageIdentity sets up automated identity management for this server.
+func manageIdentity(ctx Context, cfg *Config) error {
+ if cfg == nil || cfg.Admin == nil || cfg.Admin.Identity == nil {
+ return nil
+ }
+
+ // set default issuers; this is pretty hacky because we can't
+ // import the caddytls package -- but it works
+ if cfg.Admin.Identity.IssuersRaw == nil {
+ cfg.Admin.Identity.IssuersRaw = []json.RawMessage{
+ json.RawMessage(`{"module": "acme"}`),
+ }
+ }
+
+ // load and provision issuer modules
+ if cfg.Admin.Identity.IssuersRaw != nil {
+ val, err := ctx.LoadModule(cfg.Admin.Identity, "IssuersRaw")
+ if err != nil {
+ return fmt.Errorf("loading identity issuer modules: %s", err)
+ }
+ for _, issVal := range val.([]any) {
+ cfg.Admin.Identity.issuers = append(cfg.Admin.Identity.issuers, issVal.(certmagic.Issuer))
+ }
+ }
+
+ // we'll make a new cache when we make the CertMagic config, so stop any previous cache
+ if identityCertCache != nil {
+ identityCertCache.Stop()
+ }
+
+ logger := Log().Named("admin.identity")
+ cmCfg := cfg.Admin.Identity.certmagicConfig(logger, true)
+
+ // issuers have circular dependencies with the configs because,
+ // as explained in the caddytls package, they need access to the
+ // correct storage and cache to solve ACME challenges
+ for _, issuer := range cfg.Admin.Identity.issuers {
+ // avoid import cycle with caddytls package, so manually duplicate the interface here, yuck
+ if annoying, ok := issuer.(interface{ SetConfig(cfg *certmagic.Config) }); ok {
+ annoying.SetConfig(cmCfg)
+ }
+ }
+
+ // obtain and renew server identity certificate(s)
+ return cmCfg.ManageAsync(ctx, cfg.Admin.Identity.Identifiers)
+}
+
+// replaceRemoteAdminServer replaces the running remote admin server
+// according to the relevant configuration in cfg. It stops any previous
+// remote admin server and only starts a new one if configured.
+func replaceRemoteAdminServer(ctx Context, cfg *Config) error {
+ if cfg == nil {
+ return nil
+ }
+
+ remoteLogger := Log().Named("admin.remote")
+
+ oldAdminServer := remoteAdminServer
+ defer func() {
+ if oldAdminServer != nil {
+ go func(oldAdminServer *http.Server) {
+ err := stopAdminServer(oldAdminServer)
+ if err != nil {
+ Log().Named("admin").Error("stopping current secure admin endpoint", zap.Error(err))
+ }
+ }(oldAdminServer)
+ }
+ }()
+
+ if cfg.Admin == nil || cfg.Admin.Remote == nil {
+ return nil
+ }
+
+ addr, err := parseAdminListenAddr(cfg.Admin.Remote.Listen, DefaultRemoteAdminListen)
+ if err != nil {
+ return err
+ }
+
+ // make the HTTP handler but disable Host/Origin enforcement
+ // because we are using TLS authentication instead
+ handler := cfg.Admin.newAdminHandler(addr, true, ctx)
+
+ // create client certificate pool for TLS mutual auth, and extract public keys
+ // so that we can enforce access controls at the application layer
+ clientCertPool := x509.NewCertPool()
+ for i, accessControl := range cfg.Admin.Remote.AccessControl {
+ for j, certBase64 := range accessControl.PublicKeys {
+ cert, err := decodeBase64DERCert(certBase64)
+ if err != nil {
+ return fmt.Errorf("access control %d public key %d: parsing base64 certificate DER: %v", i, j, err)
+ }
+ accessControl.publicKeys = append(accessControl.publicKeys, cert.PublicKey)
+ clientCertPool.AddCert(cert)
+ }
+ }
+
+ // create TLS config that will enforce mutual authentication
+ if identityCertCache == nil {
+ return fmt.Errorf("cannot enable remote admin without a certificate cache; configure identity management to initialize a certificate cache")
+ }
+ cmCfg := cfg.Admin.Identity.certmagicConfig(remoteLogger, false)
+ tlsConfig := cmCfg.TLSConfig()
+ tlsConfig.NextProtos = nil // this server does not solve ACME challenges
+ tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+ tlsConfig.ClientCAs = clientCertPool
+
+ // convert logger to stdlib so it can be used by HTTP server
+ serverLogger, err := zap.NewStdLogAt(remoteLogger, zap.DebugLevel)
+ if err != nil {
+ return err
+ }
+
+ serverMu.Lock()
+ // create secure HTTP server
+ remoteAdminServer = &http.Server{
+ Addr: addr.String(), // for logging purposes only
+ Handler: handler,
+ TLSConfig: tlsConfig,
+ ReadTimeout: 10 * time.Second,
+ ReadHeaderTimeout: 5 * time.Second,
+ IdleTimeout: 60 * time.Second,
+ MaxHeaderBytes: 1024 * 64,
+ ErrorLog: serverLogger,
+ }
+ serverMu.Unlock()
+
+ // start listener
+ lnAny, err := addr.Listen(ctx, 0, net.ListenConfig{})
+ if err != nil {
+ return err
+ }
+ ln := lnAny.(net.Listener)
+ ln = tls.NewListener(ln, tlsConfig)
+
+ go func() {
+ serverMu.Lock()
+ server := remoteAdminServer
+ serverMu.Unlock()
+ if err := server.Serve(ln); !errors.Is(err, http.ErrServerClosed) {
+ remoteLogger.Error("admin remote server shutdown for unknown reason", zap.Error(err))
+ }
+ }()
+
+ remoteLogger.Info("secure admin remote control endpoint started",
+ zap.String("address", addr.String()))
+
+ return nil
+}
+
+func (ident *IdentityConfig) certmagicConfig(logger *zap.Logger, makeCache bool) *certmagic.Config {
+ var cmCfg *certmagic.Config
+ if ident == nil {
+ // user might not have configured identity; that's OK, we can still make a
+ // certmagic config, although it'll be mostly useless for remote management
+ ident = new(IdentityConfig)
+ }
+ template := certmagic.Config{
+ Storage: DefaultStorage, // do not act as part of a cluster (this is for the server's local identity)
+ Logger: logger,
+ Issuers: ident.issuers,
+ }
+ if makeCache {
+ identityCertCache = certmagic.NewCache(certmagic.CacheOptions{
+ GetConfigForCert: func(certmagic.Certificate) (*certmagic.Config, error) {
+ return cmCfg, nil
+ },
+ Logger: logger.Named("cache"),
+ })
+ }
+ cmCfg = certmagic.New(identityCertCache, template)
+ return cmCfg
+}
+
+// IdentityCredentials returns this instance's configured, managed identity credentials
+// that can be used in TLS client authentication.
+func (ctx Context) IdentityCredentials(logger *zap.Logger) ([]tls.Certificate, error) {
+ if ctx.cfg == nil || ctx.cfg.Admin == nil || ctx.cfg.Admin.Identity == nil {
+ return nil, fmt.Errorf("no server identity configured")
+ }
+ ident := ctx.cfg.Admin.Identity
+ if len(ident.Identifiers) == 0 {
+ return nil, fmt.Errorf("no identifiers configured")
+ }
+ if logger == nil {
+ logger = Log()
+ }
+ magic := ident.certmagicConfig(logger, false)
+ return magic.ClientCredentials(ctx, ident.Identifiers)
+}
+
+// enforceAccessControls enforces application-layer access controls for r based on remote.
+// It expects that the TLS server has already established at least one verified chain of
+// trust, and then looks for a matching, authorized public key that is allowed to access
+// the defined path(s) using the defined method(s).
+func (remote RemoteAdmin) enforceAccessControls(r *http.Request) error {
+ for _, chain := range r.TLS.VerifiedChains {
+ for _, peerCert := range chain {
+ for _, adminAccess := range remote.AccessControl {
+ for _, allowedKey := range adminAccess.publicKeys {
+ // see if we found a matching public key; the TLS server already verified the chain
+ // so we know the client possesses the associated private key; this handy interface
+ // doesn't appear to be defined anywhere in the std lib, but was implemented here:
+ // https://github.com/golang/go/commit/b5f2c0f50297fa5cd14af668ddd7fd923626cf8c
+ comparer, ok := peerCert.PublicKey.(interface{ Equal(crypto.PublicKey) bool })
+ if !ok || !comparer.Equal(allowedKey) {
+ continue
+ }
+
+ // key recognized; make sure its HTTP request is permitted
+ for _, accessPerm := range adminAccess.Permissions {
+ // verify method
+ methodFound := accessPerm.Methods == nil || slices.Contains(accessPerm.Methods, r.Method)
+ if !methodFound {
+ return APIError{
+ HTTPStatus: http.StatusForbidden,
+ Message: "not authorized to use this method",
+ }
+ }
+
+ // verify path
+ pathFound := accessPerm.Paths == nil
+ for _, allowedPath := range accessPerm.Paths {
+ if strings.HasPrefix(r.URL.Path, allowedPath) {
+ pathFound = true
+ break
+ }
+ }
+ if !pathFound {
+ return APIError{
+ HTTPStatus: http.StatusForbidden,
+ Message: "not authorized to access this path",
+ }
+ }
+ }
+
+ // public key authorized, method and path allowed
+ return nil
+ }
+ }
+ }
+ }
+
+ // in theory, this should never happen; with an unverified chain, the TLS server
+ // should not accept the connection in the first place, and the acceptable cert
+ // pool is configured using the same list of public keys we verify against
+ return APIError{
+ HTTPStatus: http.StatusUnauthorized,
+ Message: "client identity not authorized",
+ }
+}
+
+func stopAdminServer(srv *http.Server) error {
+ if srv == nil {
+ return fmt.Errorf("no admin server")
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ err := srv.Shutdown(ctx)
+ if err != nil {
+ return fmt.Errorf("shutting down admin server: %v", err)
+ }
+ Log().Named("admin").Info("stopped previous server", zap.String("address", srv.Addr))
+ return nil
+}
+
+// AdminRouter is a type which can return routes for the admin API.
+type AdminRouter interface {
+ Routes() []AdminRoute
+}
+
+// AdminRoute represents a route for the admin endpoint.
+type AdminRoute struct {
+ Pattern string
+ Handler AdminHandler
+}
+
+type adminHandler struct {
+ mux *http.ServeMux
+
+ // security for local/plaintext endpoint
+ enforceOrigin bool
+ enforceHost bool
+ allowedOrigins []*url.URL
+
+ // security for remote/encrypted endpoint
+ remoteControl *RemoteAdmin
+}
+
+// ServeHTTP is the external entry point for API requests.
+// It will only be called once per request.
+func (h adminHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ ip, port, err := net.SplitHostPort(r.RemoteAddr)
+ if err != nil {
+ ip = r.RemoteAddr
+ port = ""
+ }
+ log := Log().Named("admin.api").With(
+ zap.String("method", r.Method),
+ zap.String("host", r.Host),
+ zap.String("uri", r.RequestURI),
+ zap.String("remote_ip", ip),
+ zap.String("remote_port", port),
+ zap.Reflect("headers", r.Header),
+ )
+ if r.TLS != nil {
+ log = log.With(
+ zap.Bool("secure", true),
+ zap.Int("verified_chains", len(r.TLS.VerifiedChains)),
+ )
+ }
+ if r.RequestURI == "/metrics" {
+ log.Debug("received request")
+ } else {
+ log.Info("received request")
+ }
+ h.serveHTTP(w, r)
+}
+
+// serveHTTP is the internal entry point for API requests. It may
+// be called more than once per request, for example if a request
+// is rewritten (i.e. internal redirect).
+func (h adminHandler) serveHTTP(w http.ResponseWriter, r *http.Request) {
+ if h.remoteControl != nil {
+ // enforce access controls on secure endpoint
+ if err := h.remoteControl.enforceAccessControls(r); err != nil {
+ h.handleError(w, r, err)
+ return
+ }
+ }
+
+ if strings.Contains(r.Header.Get("Upgrade"), "websocket") {
+ // I've never been able demonstrate a vulnerability myself, but apparently
+ // WebSocket connections originating from browsers aren't subject to CORS
+ // restrictions, so we'll just be on the safe side
+ h.handleError(w, r, fmt.Errorf("websocket connections aren't allowed"))
+ return
+ }
+
+ if h.enforceHost {
+ // DNS rebinding mitigation
+ err := h.checkHost(r)
+ if err != nil {
+ h.handleError(w, r, err)
+ return
+ }
+ }
+
+ if h.enforceOrigin {
+ // cross-site mitigation
+ origin, err := h.checkOrigin(r)
+ if err != nil {
+ h.handleError(w, r, err)
+ return
+ }
+
+ if r.Method == http.MethodOptions {
+ w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, GET, POST, PUT, PATCH, DELETE")
+ w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Cache-Control")
+ w.Header().Set("Access-Control-Allow-Credentials", "true")
+ }
+ w.Header().Set("Access-Control-Allow-Origin", origin)
+ }
+
+ h.mux.ServeHTTP(w, r)
+}
+
+func (h adminHandler) handleError(w http.ResponseWriter, r *http.Request, err error) {
+ if err == nil {
+ return
+ }
+ if err == errInternalRedir {
+ h.serveHTTP(w, r)
+ return
+ }
+
+ apiErr, ok := err.(APIError)
+ if !ok {
+ apiErr = APIError{
+ HTTPStatus: http.StatusInternalServerError,
+ Err: err,
+ }
+ }
+ if apiErr.HTTPStatus == 0 {
+ apiErr.HTTPStatus = http.StatusInternalServerError
+ }
+ if apiErr.Message == "" && apiErr.Err != nil {
+ apiErr.Message = apiErr.Err.Error()
+ }
+
+ Log().Named("admin.api").Error("request error",
+ zap.Error(err),
+ zap.Int("status_code", apiErr.HTTPStatus),
+ )
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(apiErr.HTTPStatus)
+ encErr := json.NewEncoder(w).Encode(apiErr)
+ if encErr != nil {
+ Log().Named("admin.api").Error("failed to encode error response", zap.Error(encErr))
+ }
+}
+
+// checkHost returns a handler that wraps next such that
+// it will only be called if the request's Host header matches
+// a trustworthy/expected value. This helps to mitigate DNS
+// rebinding attacks.
+func (h adminHandler) checkHost(r *http.Request) error {
+ allowed := slices.ContainsFunc(h.allowedOrigins, func(u *url.URL) bool {
+ return r.Host == u.Host
+ })
+ if !allowed {
+ return APIError{
+ HTTPStatus: http.StatusForbidden,
+ Err: fmt.Errorf("host not allowed: %s", r.Host),
+ }
+ }
+ return nil
+}
+
+// checkOrigin ensures that the Origin header, if
+// set, matches the intended target; prevents arbitrary
+// sites from issuing requests to our listener. It
+// returns the origin that was obtained from r.
+func (h adminHandler) checkOrigin(r *http.Request) (string, error) {
+ originStr, origin := h.getOrigin(r)
+ if origin == nil {
+ return "", APIError{
+ HTTPStatus: http.StatusForbidden,
+ Err: fmt.Errorf("required Origin header is missing or invalid"),
+ }
+ }
+ if !h.originAllowed(origin) {
+ return "", APIError{
+ HTTPStatus: http.StatusForbidden,
+ Err: fmt.Errorf("client is not allowed to access from origin '%s'", originStr),
+ }
+ }
+ return origin.String(), nil
+}
+
+func (h adminHandler) getOrigin(r *http.Request) (string, *url.URL) {
+ origin := r.Header.Get("Origin")
+ if origin == "" {
+ origin = r.Header.Get("Referer")
+ }
+ originURL, err := url.Parse(origin)
+ if err != nil {
+ return origin, nil
+ }
+ originURL.Path = ""
+ originURL.RawPath = ""
+ originURL.Fragment = ""
+ originURL.RawFragment = ""
+ originURL.RawQuery = ""
+ return origin, originURL
+}
+
+func (h adminHandler) originAllowed(origin *url.URL) bool {
+ for _, allowedOrigin := range h.allowedOrigins {
+ if allowedOrigin.Scheme != "" && origin.Scheme != allowedOrigin.Scheme {
+ continue
+ }
+ if origin.Host == allowedOrigin.Host {
+ return true
+ }
+ }
+ return false
+}
+
+// etagHasher returns a the hasher we used on the config to both
+// produce and verify ETags.
+func etagHasher() hash.Hash { return xxhash.New() }
+
+// makeEtag returns an Etag header value (including quotes) for
+// the given config path and hash of contents at that path.
+func makeEtag(path string, hash hash.Hash) string {
+ return fmt.Sprintf(`"%s %x"`, path, hash.Sum(nil))
+}
+
+// This buffer pool is used to keep buffers for
+// reading the config file during eTag header generation
+var bufferPool = sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+}
+
+func handleConfig(w http.ResponseWriter, r *http.Request) error {
+ switch r.Method {
+ case http.MethodGet:
+ w.Header().Set("Content-Type", "application/json")
+ hash := etagHasher()
+
+ // Read the config into a buffer instead of writing directly to
+ // the response writer, as we want to set the ETag as the header,
+ // not the trailer.
+ buf := bufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufferPool.Put(buf)
+
+ configWriter := io.MultiWriter(buf, hash)
+ err := readConfig(r.URL.Path, configWriter)
+ if err != nil {
+ return APIError{HTTPStatus: http.StatusBadRequest, Err: err}
+ }
+
+ // we could consider setting up a sync.Pool for the summed
+ // hashes to reduce GC pressure.
+ w.Header().Set("Etag", makeEtag(r.URL.Path, hash))
+ _, err = w.Write(buf.Bytes())
+ if err != nil {
+ return APIError{HTTPStatus: http.StatusInternalServerError, Err: err}
+ }
+
+ return nil
+
+ case http.MethodPost,
+ http.MethodPut,
+ http.MethodPatch,
+ http.MethodDelete:
+
+ // DELETE does not use a body, but the others do
+ var body []byte
+ if r.Method != http.MethodDelete {
+ if ct := r.Header.Get("Content-Type"); !strings.Contains(ct, "/json") {
+ return APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("unacceptable content-type: %v; 'application/json' required", ct),
+ }
+ }
+
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+
+ _, err := io.Copy(buf, r.Body)
+ if err != nil {
+ return APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("reading request body: %v", err),
+ }
+ }
+ body = buf.Bytes()
+ }
+
+ forceReload := r.Header.Get("Cache-Control") == "must-revalidate"
+
+ err := changeConfig(r.Method, r.URL.Path, body, r.Header.Get("If-Match"), forceReload)
+ if err != nil && !errors.Is(err, errSameConfig) {
+ return err
+ }
+
+ default:
+ return APIError{
+ HTTPStatus: http.StatusMethodNotAllowed,
+ Err: fmt.Errorf("method %s not allowed", r.Method),
+ }
+ }
+
+ return nil
+}
+
+func handleConfigID(w http.ResponseWriter, r *http.Request) error {
+ idPath := r.URL.Path
+
+ parts := strings.Split(idPath, "/")
+ if len(parts) < 3 || parts[2] == "" {
+ return APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("request path is missing object ID"),
+ }
+ }
+ if parts[0] != "" || parts[1] != "id" {
+ return APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("malformed object path"),
+ }
+ }
+ id := parts[2]
+
+ // map the ID to the expanded path
+ rawCfgMu.RLock()
+ expanded, ok := rawCfgIndex[id]
+ rawCfgMu.RUnlock()
+ if !ok {
+ return APIError{
+ HTTPStatus: http.StatusNotFound,
+ Err: fmt.Errorf("unknown object ID '%s'", id),
+ }
+ }
+
+ // piece the full URL path back together
+ parts = append([]string{expanded}, parts[3:]...)
+ r.URL.Path = path.Join(parts...)
+
+ return errInternalRedir
+}
+
+func handleStop(w http.ResponseWriter, r *http.Request) error {
+ if r.Method != http.MethodPost {
+ return APIError{
+ HTTPStatus: http.StatusMethodNotAllowed,
+ Err: fmt.Errorf("method not allowed"),
+ }
+ }
+
+ exitProcess(context.Background(), Log().Named("admin.api"))
+ return nil
+}
+
+// unsyncedConfigAccess traverses into the current config and performs
+// the operation at path according to method, using body and out as
+// needed. This is a low-level, unsynchronized function; most callers
+// will want to use changeConfig or readConfig instead. This requires a
+// read or write lock on currentCtxMu, depending on method (GET needs
+// only a read lock; all others need a write lock).
+func unsyncedConfigAccess(method, path string, body []byte, out io.Writer) error {
+ var err error
+ var val any
+
+ // if there is a request body, decode it into the
+ // variable that will be set in the config according
+ // to method and path
+ if len(body) > 0 {
+ err = json.Unmarshal(body, &val)
+ if err != nil {
+ return fmt.Errorf("decoding request body: %v", err)
+ }
+ }
+
+ enc := json.NewEncoder(out)
+
+ cleanPath := strings.Trim(path, "/")
+ if cleanPath == "" {
+ return fmt.Errorf("no traversable path")
+ }
+
+ parts := strings.Split(cleanPath, "/")
+ if len(parts) == 0 {
+ return fmt.Errorf("path missing")
+ }
+
+ // A path that ends with "..." implies:
+ // 1) the part before it is an array
+ // 2) the payload is an array
+ // and means that the user wants to expand the elements
+ // in the payload array and append each one into the
+ // destination array, like so:
+ // array = append(array, elems...)
+ // This special case is handled below.
+ ellipses := parts[len(parts)-1] == "..."
+ if ellipses {
+ parts = parts[:len(parts)-1]
+ }
+
+ var ptr any = rawCfg
+
+traverseLoop:
+ for i, part := range parts {
+ switch v := ptr.(type) {
+ case map[string]any:
+ // if the next part enters a slice, and the slice is our destination,
+ // handle it specially (because appending to the slice copies the slice
+ // header, which does not replace the original one like we want)
+ if arr, ok := v[part].([]any); ok && i == len(parts)-2 {
+ var idx int
+ if method != http.MethodPost {
+ idxStr := parts[len(parts)-1]
+ idx, err = strconv.Atoi(idxStr)
+ if err != nil {
+ return fmt.Errorf("[%s] invalid array index '%s': %v",
+ path, idxStr, err)
+ }
+ if idx < 0 || (method != http.MethodPut && idx >= len(arr)) || idx > len(arr) {
+ return fmt.Errorf("[%s] array index out of bounds: %s", path, idxStr)
+ }
+ }
+
+ switch method {
+ case http.MethodGet:
+ err = enc.Encode(arr[idx])
+ if err != nil {
+ return fmt.Errorf("encoding config: %v", err)
+ }
+ case http.MethodPost:
+ if ellipses {
+ valArray, ok := val.([]any)
+ if !ok {
+ return fmt.Errorf("final element is not an array")
+ }
+ v[part] = append(arr, valArray...)
+ } else {
+ v[part] = append(arr, val)
+ }
+ case http.MethodPut:
+ // avoid creation of new slice and a second copy (see
+ // https://github.com/golang/go/wiki/SliceTricks#insert)
+ arr = append(arr, nil)
+ copy(arr[idx+1:], arr[idx:])
+ arr[idx] = val
+ v[part] = arr
+ case http.MethodPatch:
+ arr[idx] = val
+ case http.MethodDelete:
+ v[part] = append(arr[:idx], arr[idx+1:]...)
+ default:
+ return fmt.Errorf("unrecognized method %s", method)
+ }
+ break traverseLoop
+ }
+
+ if i == len(parts)-1 {
+ switch method {
+ case http.MethodGet:
+ err = enc.Encode(v[part])
+ if err != nil {
+ return fmt.Errorf("encoding config: %v", err)
+ }
+ case http.MethodPost:
+ // if the part is an existing list, POST appends to
+ // it, otherwise it just sets or creates the value
+ if arr, ok := v[part].([]any); ok {
+ if ellipses {
+ valArray, ok := val.([]any)
+ if !ok {
+ return fmt.Errorf("final element is not an array")
+ }
+ v[part] = append(arr, valArray...)
+ } else {
+ v[part] = append(arr, val)
+ }
+ } else {
+ v[part] = val
+ }
+ case http.MethodPut:
+ if _, ok := v[part]; ok {
+ return APIError{
+ HTTPStatus: http.StatusConflict,
+ Err: fmt.Errorf("[%s] key already exists: %s", path, part),
+ }
+ }
+ v[part] = val
+ case http.MethodPatch:
+ if _, ok := v[part]; !ok {
+ return APIError{
+ HTTPStatus: http.StatusNotFound,
+ Err: fmt.Errorf("[%s] key does not exist: %s", path, part),
+ }
+ }
+ v[part] = val
+ case http.MethodDelete:
+ if _, ok := v[part]; !ok {
+ return APIError{
+ HTTPStatus: http.StatusNotFound,
+ Err: fmt.Errorf("[%s] key does not exist: %s", path, part),
+ }
+ }
+ delete(v, part)
+ default:
+ return fmt.Errorf("unrecognized method %s", method)
+ }
+ } else {
+ // if we are "PUTting" a new resource, the key(s) in its path
+ // might not exist yet; that's OK but we need to make them as
+ // we go, while we still have a pointer from the level above
+ if v[part] == nil && method == http.MethodPut {
+ v[part] = make(map[string]any)
+ }
+ ptr = v[part]
+ }
+
+ case []any:
+ partInt, err := strconv.Atoi(part)
+ if err != nil {
+ return fmt.Errorf("[/%s] invalid array index '%s': %v",
+ strings.Join(parts[:i+1], "/"), part, err)
+ }
+ if partInt < 0 || partInt >= len(v) {
+ return fmt.Errorf("[/%s] array index out of bounds: %s",
+ strings.Join(parts[:i+1], "/"), part)
+ }
+ ptr = v[partInt]
+
+ default:
+ return fmt.Errorf("invalid traversal path at: %s", strings.Join(parts[:i+1], "/"))
+ }
+ }
+
+ return nil
+}
+
+// RemoveMetaFields removes meta fields like "@id" from a JSON message
+// by using a simple regular expression. (An alternate way to do this
+// would be to delete them from the raw, map[string]any
+// representation as they are indexed, then iterate the index we made
+// and add them back after encoding as JSON, but this is simpler.)
+func RemoveMetaFields(rawJSON []byte) []byte {
+ return idRegexp.ReplaceAllFunc(rawJSON, func(in []byte) []byte {
+ // matches with a comma on both sides (when "@id" property is
+ // not the first or last in the object) need to keep exactly
+ // one comma for correct JSON syntax
+ comma := []byte{','}
+ if bytes.HasPrefix(in, comma) && bytes.HasSuffix(in, comma) {
+ return comma
+ }
+ return []byte{}
+ })
+}
+
+// AdminHandler is like http.Handler except ServeHTTP may return an error.
+//
+// If any handler encounters an error, it should be returned for proper
+// handling.
+type AdminHandler interface {
+ ServeHTTP(http.ResponseWriter, *http.Request) error
+}
+
+// AdminHandlerFunc is a convenience type like http.HandlerFunc.
+type AdminHandlerFunc func(http.ResponseWriter, *http.Request) error
+
+// ServeHTTP implements the Handler interface.
+func (f AdminHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error {
+ return f(w, r)
+}
+
+// APIError is a structured error that every API
+// handler should return for consistency in logging
+// and client responses. If Message is unset, then
+// Err.Error() will be serialized in its place.
+type APIError struct {
+ HTTPStatus int `json:"-"`
+ Err error `json:"-"`
+ Message string `json:"error"`
+}
+
+func (e APIError) Error() string {
+ if e.Err != nil {
+ return e.Err.Error()
+ }
+ return e.Message
+}
+
+// parseAdminListenAddr extracts a singular listen address from either addr
+// or defaultAddr, returning the network and the address of the listener.
+func parseAdminListenAddr(addr string, defaultAddr string) (NetworkAddress, error) {
+ input, err := NewReplacer().ReplaceOrErr(addr, true, true)
+ if err != nil {
+ return NetworkAddress{}, fmt.Errorf("replacing listen address: %v", err)
+ }
+ if input == "" {
+ input = defaultAddr
+ }
+ listenAddr, err := ParseNetworkAddress(input)
+ if err != nil {
+ return NetworkAddress{}, fmt.Errorf("parsing listener address: %v", err)
+ }
+ if listenAddr.PortRangeSize() != 1 {
+ return NetworkAddress{}, fmt.Errorf("must be exactly one listener address; cannot listen on: %s", listenAddr)
+ }
+ return listenAddr, nil
+}
+
+// decodeBase64DERCert base64-decodes, then DER-decodes, certStr.
+func decodeBase64DERCert(certStr string) (*x509.Certificate, error) {
+ derBytes, err := base64.StdEncoding.DecodeString(certStr)
+ if err != nil {
+ return nil, err
+ }
+ return x509.ParseCertificate(derBytes)
+}
+
+type loggableURLArray []*url.URL
+
+func (ua loggableURLArray) MarshalLogArray(enc zapcore.ArrayEncoder) error {
+ if ua == nil {
+ return nil
+ }
+ for _, u := range ua {
+ enc.AppendString(u.String())
+ }
+ return nil
+}
+
+var (
+ // DefaultAdminListen is the address for the local admin
+ // listener, if none is specified at startup.
+ DefaultAdminListen = "localhost:2019"
+
+ // DefaultRemoteAdminListen is the address for the remote
+ // (TLS-authenticated) admin listener, if enabled and not
+ // specified otherwise.
+ DefaultRemoteAdminListen = ":2021"
+)
+
+// PIDFile writes a pidfile to the file at filename. It
+// will get deleted before the process gracefully exits.
+func PIDFile(filename string) error {
+ pid := []byte(strconv.Itoa(os.Getpid()) + "\n")
+ err := os.WriteFile(filename, pid, 0o600)
+ if err != nil {
+ return err
+ }
+ pidfile = filename
+ return nil
+}
+
+// idRegexp is used to match ID fields and their associated values
+// in the config. It also matches adjacent commas so that syntax
+// can be preserved no matter where in the object the field appears.
+// It supports string and most numeric values.
+var idRegexp = regexp.MustCompile(`(?m),?\s*"` + idKey + `"\s*:\s*(-?[0-9]+(\.[0-9]+)?|(?U)".*")\s*,?`)
+
+// pidfile is the name of the pidfile, if any.
+var pidfile string
+
+// errInternalRedir indicates an internal redirect
+// and is useful when admin API handlers rewrite
+// the request; in that case, authentication and
+// authorization needs to happen again for the
+// rewritten request.
+var errInternalRedir = fmt.Errorf("internal redirect; re-authorization required")
+
+const (
+ rawConfigKey = "config"
+ idKey = "@id"
+)
+
+var bufPool = sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+}
+
+// keep a reference to admin endpoint singletons while they're active
+var (
+ serverMu sync.Mutex
+ localAdminServer, remoteAdminServer *http.Server
+ identityCertCache *certmagic.Cache
+)
diff --git a/admin_test.go b/admin_test.go
new file mode 100644
index 00000000000..b00cfaae251
--- /dev/null
+++ b/admin_test.go
@@ -0,0 +1,939 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddy
+
+import (
+ "context"
+ "crypto/x509"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "sync"
+ "testing"
+
+ "github.com/caddyserver/certmagic"
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+var testCfg = []byte(`{
+ "apps": {
+ "http": {
+ "servers": {
+ "myserver": {
+ "listen": ["tcp/localhost:8080-8084"],
+ "read_timeout": "30s"
+ },
+ "yourserver": {
+ "listen": ["127.0.0.1:5000"],
+ "read_header_timeout": "15s"
+ }
+ }
+ }
+ }
+ }
+ `)
+
+func TestUnsyncedConfigAccess(t *testing.T) {
+ // each test is performed in sequence, so
+ // each change builds on the previous ones;
+ // the config is not reset between tests
+ for i, tc := range []struct {
+ method string
+ path string // rawConfigKey will be prepended
+ payload string
+ expect string // JSON representation of what the whole config is expected to be after the request
+ shouldErr bool
+ }{
+ {
+ method: "POST",
+ path: "",
+ payload: `{"foo": "bar", "list": ["a", "b", "c"]}`, // starting value
+ expect: `{"foo": "bar", "list": ["a", "b", "c"]}`,
+ },
+ {
+ method: "POST",
+ path: "/foo",
+ payload: `"jet"`,
+ expect: `{"foo": "jet", "list": ["a", "b", "c"]}`,
+ },
+ {
+ method: "POST",
+ path: "/bar",
+ payload: `{"aa": "bb", "qq": "zz"}`,
+ expect: `{"foo": "jet", "bar": {"aa": "bb", "qq": "zz"}, "list": ["a", "b", "c"]}`,
+ },
+ {
+ method: "DELETE",
+ path: "/bar/qq",
+ expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c"]}`,
+ },
+ {
+ method: "DELETE",
+ path: "/bar/qq",
+ expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c"]}`,
+ shouldErr: true,
+ },
+ {
+ method: "POST",
+ path: "/list",
+ payload: `"e"`,
+ expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c", "e"]}`,
+ },
+ {
+ method: "PUT",
+ path: "/list/3",
+ payload: `"d"`,
+ expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c", "d", "e"]}`,
+ },
+ {
+ method: "DELETE",
+ path: "/list/3",
+ expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c", "e"]}`,
+ },
+ {
+ method: "PATCH",
+ path: "/list/3",
+ payload: `"d"`,
+ expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c", "d"]}`,
+ },
+ {
+ method: "POST",
+ path: "/list/...",
+ payload: `["e", "f", "g"]`,
+ expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c", "d", "e", "f", "g"]}`,
+ },
+ } {
+ err := unsyncedConfigAccess(tc.method, rawConfigKey+tc.path, []byte(tc.payload), nil)
+
+ if tc.shouldErr && err == nil {
+ t.Fatalf("Test %d: Expected error return value, but got: %v", i, err)
+ }
+ if !tc.shouldErr && err != nil {
+ t.Fatalf("Test %d: Should not have had error return value, but got: %v", i, err)
+ }
+
+ // decode the expected config so we can do a convenient DeepEqual
+ var expectedDecoded any
+ err = json.Unmarshal([]byte(tc.expect), &expectedDecoded)
+ if err != nil {
+ t.Fatalf("Test %d: Unmarshaling expected config: %v", i, err)
+ }
+
+ // make sure the resulting config is as we expect it
+ if !reflect.DeepEqual(rawCfg[rawConfigKey], expectedDecoded) {
+ t.Fatalf("Test %d:\nExpected:\n\t%#v\nActual:\n\t%#v",
+ i, expectedDecoded, rawCfg[rawConfigKey])
+ }
+ }
+}
+
+// TestLoadConcurrent exercises Load under concurrent conditions
+// and is most useful under test with `-race` enabled.
+func TestLoadConcurrent(t *testing.T) {
+ var wg sync.WaitGroup
+
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func() {
+ _ = Load(testCfg, true)
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+type fooModule struct {
+ IntField int
+ StrField string
+}
+
+func (fooModule) CaddyModule() ModuleInfo {
+ return ModuleInfo{
+ ID: "foo",
+ New: func() Module { return new(fooModule) },
+ }
+}
+func (fooModule) Start() error { return nil }
+func (fooModule) Stop() error { return nil }
+
+func TestETags(t *testing.T) {
+ RegisterModule(fooModule{})
+
+ if err := Load([]byte(`{"admin": {"listen": "localhost:2999"}, "apps": {"foo": {"strField": "abc", "intField": 0}}}`), true); err != nil {
+ t.Fatalf("loading: %s", err)
+ }
+
+ const key = "/" + rawConfigKey + "/apps/foo"
+
+ // try update the config with the wrong etag
+ err := changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 1}}`), fmt.Sprintf(`"/%s not_an_etag"`, rawConfigKey), false)
+ if apiErr, ok := err.(APIError); !ok || apiErr.HTTPStatus != http.StatusPreconditionFailed {
+ t.Fatalf("expected precondition failed; got %v", err)
+ }
+
+ // get the etag
+ hash := etagHasher()
+ if err := readConfig(key, hash); err != nil {
+ t.Fatalf("reading: %s", err)
+ }
+
+ // do the same update with the correct key
+ err = changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 1}`), makeEtag(key, hash), false)
+ if err != nil {
+ t.Fatalf("expected update to work; got %v", err)
+ }
+
+ // now try another update. The hash should no longer match and we should get precondition failed
+ err = changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 2}`), makeEtag(key, hash), false)
+ if apiErr, ok := err.(APIError); !ok || apiErr.HTTPStatus != http.StatusPreconditionFailed {
+ t.Fatalf("expected precondition failed; got %v", err)
+ }
+}
+
+func BenchmarkLoad(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Load(testCfg, true)
+ }
+}
+
+func TestAdminHandlerErrorHandling(t *testing.T) {
+ initAdminMetrics()
+
+ handler := adminHandler{
+ mux: http.NewServeMux(),
+ }
+
+ handler.mux.Handle("/error", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ err := fmt.Errorf("test error")
+ handler.handleError(w, r, err)
+ }))
+
+ req := httptest.NewRequest(http.MethodGet, "/error", nil)
+ rr := httptest.NewRecorder()
+
+ handler.ServeHTTP(rr, req)
+
+ if rr.Code == http.StatusOK {
+ t.Error("expected error response, got success")
+ }
+
+ var apiErr APIError
+ if err := json.NewDecoder(rr.Body).Decode(&apiErr); err != nil {
+ t.Fatalf("decoding response: %v", err)
+ }
+ if apiErr.Message != "test error" {
+ t.Errorf("expected error message 'test error', got '%s'", apiErr.Message)
+ }
+}
+
+func initAdminMetrics() {
+ if adminMetrics.requestErrors != nil {
+ prometheus.Unregister(adminMetrics.requestErrors)
+ }
+ if adminMetrics.requestCount != nil {
+ prometheus.Unregister(adminMetrics.requestCount)
+ }
+
+ adminMetrics.requestErrors = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "caddy",
+ Subsystem: "admin_http",
+ Name: "request_errors_total",
+ Help: "Number of errors that occurred handling admin endpoint requests",
+ }, []string{"handler", "path", "method"})
+
+ adminMetrics.requestCount = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "caddy",
+ Subsystem: "admin_http",
+ Name: "requests_total",
+ Help: "Count of requests to the admin endpoint",
+ }, []string{"handler", "path", "code", "method"}) // Added code and method labels
+
+ prometheus.MustRegister(adminMetrics.requestErrors)
+ prometheus.MustRegister(adminMetrics.requestCount)
+}
+
+func TestAdminHandlerBuiltinRouteErrors(t *testing.T) {
+ initAdminMetrics()
+
+ cfg := &Config{
+ Admin: &AdminConfig{
+ Listen: "localhost:2019",
+ },
+ }
+
+ err := replaceLocalAdminServer(cfg, Context{})
+ if err != nil {
+ t.Fatalf("setting up admin server: %v", err)
+ }
+ defer func() {
+ stopAdminServer(localAdminServer)
+ }()
+
+ tests := []struct {
+ name string
+ path string
+ method string
+ expectedStatus int
+ }{
+ {
+ name: "stop endpoint wrong method",
+ path: "/stop",
+ method: http.MethodGet,
+ expectedStatus: http.StatusMethodNotAllowed,
+ },
+ {
+ name: "config endpoint wrong content-type",
+ path: "/config/",
+ method: http.MethodPost,
+ expectedStatus: http.StatusBadRequest,
+ },
+ {
+ name: "config ID missing ID",
+ path: "/id/",
+ method: http.MethodGet,
+ expectedStatus: http.StatusBadRequest,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ req := httptest.NewRequest(test.method, fmt.Sprintf("http://localhost:2019%s", test.path), nil)
+ rr := httptest.NewRecorder()
+
+ localAdminServer.Handler.ServeHTTP(rr, req)
+
+ if rr.Code != test.expectedStatus {
+ t.Errorf("expected status %d but got %d", test.expectedStatus, rr.Code)
+ }
+
+ metricValue := testGetMetricValue(map[string]string{
+ "path": test.path,
+ "handler": "admin",
+ "method": test.method,
+ })
+ if metricValue != 1 {
+ t.Errorf("expected error metric to be incremented once, got %v", metricValue)
+ }
+ })
+ }
+}
+
+func testGetMetricValue(labels map[string]string) float64 {
+ promLabels := prometheus.Labels{}
+ for k, v := range labels {
+ promLabels[k] = v
+ }
+
+ metric, err := adminMetrics.requestErrors.GetMetricWith(promLabels)
+ if err != nil {
+ return 0
+ }
+
+ pb := &dto.Metric{}
+ metric.Write(pb)
+ return pb.GetCounter().GetValue()
+}
+
+type mockRouter struct {
+ routes []AdminRoute
+}
+
+func (m mockRouter) Routes() []AdminRoute {
+ return m.routes
+}
+
+type mockModule struct {
+ mockRouter
+}
+
+func (m *mockModule) CaddyModule() ModuleInfo {
+ return ModuleInfo{
+ ID: "admin.api.mock",
+ New: func() Module {
+ mm := &mockModule{
+ mockRouter: mockRouter{
+ routes: m.routes,
+ },
+ }
+ return mm
+ },
+ }
+}
+
+func TestNewAdminHandlerRouterRegistration(t *testing.T) {
+ originalModules := make(map[string]ModuleInfo)
+ for k, v := range modules {
+ originalModules[k] = v
+ }
+ defer func() {
+ modules = originalModules
+ }()
+
+ mockRoute := AdminRoute{
+ Pattern: "/mock",
+ Handler: AdminHandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
+ w.WriteHeader(http.StatusOK)
+ return nil
+ }),
+ }
+
+ mock := &mockModule{
+ mockRouter: mockRouter{
+ routes: []AdminRoute{mockRoute},
+ },
+ }
+ RegisterModule(mock)
+
+ addr, err := ParseNetworkAddress("localhost:2019")
+ if err != nil {
+ t.Fatalf("Failed to parse address: %v", err)
+ }
+
+ admin := &AdminConfig{
+ EnforceOrigin: false,
+ }
+ handler := admin.newAdminHandler(addr, false, Context{})
+
+ req := httptest.NewRequest("GET", "/mock", nil)
+ req.Host = "localhost:2019"
+ rr := httptest.NewRecorder()
+
+ handler.ServeHTTP(rr, req)
+
+ if rr.Code != http.StatusOK {
+ t.Errorf("Expected status code %d but got %d", http.StatusOK, rr.Code)
+ t.Logf("Response body: %s", rr.Body.String())
+ }
+
+ if len(admin.routers) != 1 {
+ t.Errorf("Expected 1 router to be stored, got %d", len(admin.routers))
+ }
+}
+
+type mockProvisionableRouter struct {
+ mockRouter
+ provisionErr error
+ provisioned bool
+}
+
+func (m *mockProvisionableRouter) Provision(Context) error {
+ m.provisioned = true
+ return m.provisionErr
+}
+
+type mockProvisionableModule struct {
+ *mockProvisionableRouter
+}
+
+func (m *mockProvisionableModule) CaddyModule() ModuleInfo {
+ return ModuleInfo{
+ ID: "admin.api.mock_provision",
+ New: func() Module {
+ mm := &mockProvisionableModule{
+ mockProvisionableRouter: &mockProvisionableRouter{
+ mockRouter: m.mockRouter,
+ provisionErr: m.provisionErr,
+ },
+ }
+ return mm
+ },
+ }
+}
+
+func TestAdminRouterProvisioning(t *testing.T) {
+ tests := []struct {
+ name string
+ provisionErr error
+ wantErr bool
+ routersAfter int // expected number of routers after provisioning
+ }{
+ {
+ name: "successful provisioning",
+ provisionErr: nil,
+ wantErr: false,
+ routersAfter: 0,
+ },
+ {
+ name: "provisioning error",
+ provisionErr: fmt.Errorf("provision failed"),
+ wantErr: true,
+ routersAfter: 1,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ originalModules := make(map[string]ModuleInfo)
+ for k, v := range modules {
+ originalModules[k] = v
+ }
+ defer func() {
+ modules = originalModules
+ }()
+
+ mockRoute := AdminRoute{
+ Pattern: "/mock",
+ Handler: AdminHandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
+ return nil
+ }),
+ }
+
+ // Create provisionable module
+ mock := &mockProvisionableModule{
+ mockProvisionableRouter: &mockProvisionableRouter{
+ mockRouter: mockRouter{
+ routes: []AdminRoute{mockRoute},
+ },
+ provisionErr: test.provisionErr,
+ },
+ }
+ RegisterModule(mock)
+
+ admin := &AdminConfig{}
+ addr, err := ParseNetworkAddress("localhost:2019")
+ if err != nil {
+ t.Fatalf("Failed to parse address: %v", err)
+ }
+
+ _ = admin.newAdminHandler(addr, false, Context{})
+ err = admin.provisionAdminRouters(Context{})
+
+ if test.wantErr {
+ if err == nil {
+ t.Error("Expected error but got nil")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Expected no error but got: %v", err)
+ }
+ }
+
+ if len(admin.routers) != test.routersAfter {
+ t.Errorf("Expected %d routers after provisioning, got %d", test.routersAfter, len(admin.routers))
+ }
+ })
+ }
+}
+
+func TestAllowedOriginsUnixSocket(t *testing.T) {
+ tests := []struct {
+ name string
+ addr NetworkAddress
+ origins []string
+ expectOrigins []string
+ }{
+ {
+ name: "unix socket with default origins",
+ addr: NetworkAddress{
+ Network: "unix",
+ Host: "/tmp/caddy.sock",
+ },
+ origins: nil, // default origins
+ expectOrigins: []string{
+ "", // empty host as per RFC 2616
+ "127.0.0.1",
+ "::1",
+ },
+ },
+ {
+ name: "unix socket with custom origins",
+ addr: NetworkAddress{
+ Network: "unix",
+ Host: "/tmp/caddy.sock",
+ },
+ origins: []string{"example.com"},
+ expectOrigins: []string{
+ "example.com",
+ },
+ },
+ {
+ name: "tcp socket on localhost gets all loopback addresses",
+ addr: NetworkAddress{
+ Network: "tcp",
+ Host: "localhost",
+ StartPort: 2019,
+ EndPort: 2019,
+ },
+ origins: nil,
+ expectOrigins: []string{
+ "localhost:2019",
+ "[::1]:2019",
+ "127.0.0.1:2019",
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ admin := AdminConfig{
+ Origins: test.origins,
+ }
+
+ got := admin.allowedOrigins(test.addr)
+
+ var gotOrigins []string
+ for _, u := range got {
+ gotOrigins = append(gotOrigins, u.Host)
+ }
+
+ if len(gotOrigins) != len(test.expectOrigins) {
+ t.Errorf("Expected %d origins but got %d", len(test.expectOrigins), len(gotOrigins))
+ return
+ }
+
+ expectMap := make(map[string]struct{})
+ for _, origin := range test.expectOrigins {
+ expectMap[origin] = struct{}{}
+ }
+
+ gotMap := make(map[string]struct{})
+ for _, origin := range gotOrigins {
+ gotMap[origin] = struct{}{}
+ }
+
+ if !reflect.DeepEqual(expectMap, gotMap) {
+ t.Errorf("Origins mismatch.\nExpected: %v\nGot: %v", test.expectOrigins, gotOrigins)
+ }
+ })
+ }
+}
+
+func TestReplaceRemoteAdminServer(t *testing.T) {
+ const testCert = `MIIDCTCCAfGgAwIBAgIUXsqJ1mY8pKlHQtI3HJ23x2eZPqwwDQYJKoZIhvcNAQEL
+BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTIzMDEwMTAwMDAwMFoXDTI0MDEw
+MTAwMDAwMFowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEA4O4S6BSoYcoxvRqI+h7yPOjF6KjntjzVVm9M+uHK4lzX
+F1L3pSxJ2nDD4wZEV3FJ5yFOHVFqkG2vXG3BIczOlYG7UeNmKbQnKc5kZj3HGUrS
+VGEktA4OJbeZhhWP15gcXN5eDM2eH3g9BFXVX6AURxLiUXzhNBUEZuj/OEyH9yEF
+/qPCE+EjzVvWxvBXwgz/io4r4yok/Vq/bxJ6FlV6R7DX5oJSXyO0VEHZPi9DIyNU
+kK3F/r4U1sWiJGWOs8i3YQWZ2ejh1C0aLFZpPcCGGgMNpoF31gyYP6ZuPDUyCXsE
+g36UUw1JHNtIXYcLhnXuqj4A8TybTDpgXLqvwA9DBQIDAQABo1MwUTAdBgNVHQ4E
+FgQUc13z30pFC63rr/HGKOE7E82vjXwwHwYDVR0jBBgwFoAUc13z30pFC63rr/HG
+KOE7E82vjXwwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAHO3j
+oeiUXXJ7xD4P8Wj5t9d+E8lE1Xv1Dk3Z+EdG5+dan+RcToE42JJp9zB7FIh5Qz8g
+W77LAjqh5oyqz3A2VJcyVgfE3uJP1R1mJM7JfGHf84QH4TZF2Q1RZY4SZs0VQ6+q
+5wSlIZ4NXDy4Q4XkIJBGS61wT8IzYFXYBpx4PCP1Qj0PIE4sevEGwjsBIgxK307o
+BxF8AWe6N6e4YZmQLGjQ+SeH0iwZb6vpkHyAY8Kj2hvK+cq2P7vU3VGi0t3r1F8L
+IvrXHCvO2BMNJ/1UK1M4YNX8LYJqQhg9hEsIROe1OE/m3VhxIYMJI+qZXk9yHfgJ
+vq+SH04xKhtFudVBAQ==`
+
+ tests := []struct {
+ name string
+ cfg *Config
+ wantErr bool
+ }{
+ {
+ name: "nil config",
+ cfg: nil,
+ wantErr: false,
+ },
+ {
+ name: "nil admin config",
+ cfg: &Config{
+ Admin: nil,
+ },
+ wantErr: false,
+ },
+ {
+ name: "nil remote config",
+ cfg: &Config{
+ Admin: &AdminConfig{},
+ },
+ wantErr: false,
+ },
+ {
+ name: "invalid listen address",
+ cfg: &Config{
+ Admin: &AdminConfig{
+ Remote: &RemoteAdmin{
+ Listen: "invalid:address",
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "valid config",
+ cfg: &Config{
+ Admin: &AdminConfig{
+ Identity: &IdentityConfig{},
+ Remote: &RemoteAdmin{
+ Listen: "localhost:2021",
+ AccessControl: []*AdminAccess{
+ {
+ PublicKeys: []string{testCert},
+ Permissions: []AdminPermissions{{Methods: []string{"GET"}, Paths: []string{"/test"}}},
+ },
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "invalid certificate",
+ cfg: &Config{
+ Admin: &AdminConfig{
+ Identity: &IdentityConfig{},
+ Remote: &RemoteAdmin{
+ Listen: "localhost:2021",
+ AccessControl: []*AdminAccess{
+ {
+ PublicKeys: []string{"invalid-cert-data"},
+ Permissions: []AdminPermissions{{Methods: []string{"GET"}, Paths: []string{"/test"}}},
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ ctx := Context{
+ Context: context.Background(),
+ cfg: test.cfg,
+ }
+
+ if test.cfg != nil {
+ test.cfg.storage = &certmagic.FileStorage{Path: t.TempDir()}
+ }
+
+ if test.cfg != nil && test.cfg.Admin != nil && test.cfg.Admin.Identity != nil {
+ identityCertCache = certmagic.NewCache(certmagic.CacheOptions{
+ GetConfigForCert: func(certmagic.Certificate) (*certmagic.Config, error) {
+ return &certmagic.Config{}, nil
+ },
+ })
+ }
+
+ err := replaceRemoteAdminServer(ctx, test.cfg)
+
+ if test.wantErr {
+ if err == nil {
+ t.Error("Expected error but got nil")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Expected no error but got: %v", err)
+ }
+ }
+
+ // Clean up
+ if remoteAdminServer != nil {
+ _ = stopAdminServer(remoteAdminServer)
+ }
+ })
+ }
+}
+
+type mockIssuer struct {
+ configSet *certmagic.Config
+}
+
+func (m *mockIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) {
+ return &certmagic.IssuedCertificate{
+ Certificate: []byte(csr.Raw),
+ }, nil
+}
+
+func (m *mockIssuer) SetConfig(cfg *certmagic.Config) {
+ m.configSet = cfg
+}
+
+func (m *mockIssuer) IssuerKey() string {
+ return "mock"
+}
+
+type mockIssuerModule struct {
+ *mockIssuer
+}
+
+func (m *mockIssuerModule) CaddyModule() ModuleInfo {
+ return ModuleInfo{
+ ID: "tls.issuance.acme",
+ New: func() Module {
+ return &mockIssuerModule{mockIssuer: new(mockIssuer)}
+ },
+ }
+}
+
+func TestManageIdentity(t *testing.T) {
+ originalModules := make(map[string]ModuleInfo)
+ for k, v := range modules {
+ originalModules[k] = v
+ }
+ defer func() {
+ modules = originalModules
+ }()
+
+ RegisterModule(&mockIssuerModule{})
+
+ certPEM := []byte(`-----BEGIN CERTIFICATE-----
+MIIDujCCAqKgAwIBAgIIE31FZVaPXTUwDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UE
+BhMCVVMxEzARBgNVBAoTCkdvb2dsZSBJbmMxJTAjBgNVBAMTHEdvb2dsZSBJbnRl
+cm5ldCBBdXRob3JpdHkgRzIwHhcNMTQwMTI5MTMyNzQzWhcNMTQwNTI5MDAwMDAw
+WjBpMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwN
+TW91bnRhaW4gVmlldzETMBEGA1UECgwKR29vZ2xlIEluYzEYMBYGA1UEAwwPbWFp
+bC5nb29nbGUuY29tMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE3lcub2pUwkjC
+5GJQA2ZZfJJi6d1QHhEmkX9VxKYGp6gagZuRqJWy9TXP6++1ZzQQxqZLD0TkuxZ9
+8i9Nz00000CCBjCCAQQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGgG
+CCsGAQUFBwEBBFwwWjArBggrBgEFBQcwAoYfaHR0cDovL3BraS5nb29nbGUuY29t
+L0dJQUcyLmNydDArBggrBgEFBQcwAYYfaHR0cDovL2NsaWVudHMxLmdvb2dsZS5j
+b20vb2NzcDAdBgNVHQ4EFgQUiJxtimAuTfwb+aUtBn5UYKreKvMwDAYDVR0TAQH/
+BAIwADAfBgNVHSMEGDAWgBRK3QYWG7z2aLV29YG2u2IaulqBLzAXBgNVHREEEDAO
+ggxtYWlsLmdvb2dsZTANBgkqhkiG9w0BAQUFAAOCAQEAMP6IWgNGZE8wP9TjFjSZ
+3mmW3A1eIr0CuPwNZ2LJ5ZD1i70ojzcj4I9IdP5yPg9CAEV4hNASbM1LzfC7GmJE
+tPzW5tRmpKVWZGRgTgZI8Hp/xZXMwLh9ZmXV4kESFAGj5G5FNvJyUV7R5Eh+7OZX
+7G4jJ4ZGJh+5jzN9HdJJHQHGYNIYOzC7+HH9UMwCjX9vhQ4RjwFZJThS2Yb+y7pb
+9yxTJZoXC6J0H5JpnZb7kZEJ+Xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+-----END CERTIFICATE-----`)
+
+ keyPEM := []byte(`-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDRS0LmTwUT0iwP
+...
+-----END PRIVATE KEY-----`)
+
+ testStorage := certmagic.FileStorage{Path: t.TempDir()}
+ err := testStorage.Store(context.Background(), "localhost/localhost.crt", certPEM)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = testStorage.Store(context.Background(), "localhost/localhost.key", keyPEM)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tests := []struct {
+ name string
+ cfg *Config
+ wantErr bool
+ checkState func(*testing.T, *Config)
+ }{
+ {
+ name: "nil config",
+ cfg: nil,
+ },
+ {
+ name: "nil admin config",
+ cfg: &Config{
+ Admin: nil,
+ },
+ },
+ {
+ name: "nil identity config",
+ cfg: &Config{
+ Admin: &AdminConfig{},
+ },
+ },
+ {
+ name: "default issuer when none specified",
+ cfg: &Config{
+ Admin: &AdminConfig{
+ Identity: &IdentityConfig{
+ Identifiers: []string{"localhost"},
+ },
+ },
+ storage: &testStorage,
+ },
+ checkState: func(t *testing.T, cfg *Config) {
+ if len(cfg.Admin.Identity.issuers) == 0 {
+ t.Error("Expected at least 1 issuer to be configured")
+ return
+ }
+ if _, ok := cfg.Admin.Identity.issuers[0].(*mockIssuerModule); !ok {
+ t.Error("Expected mock issuer to be configured")
+ }
+ },
+ },
+ {
+ name: "custom issuer",
+ cfg: &Config{
+ Admin: &AdminConfig{
+ Identity: &IdentityConfig{
+ Identifiers: []string{"localhost"},
+ IssuersRaw: []json.RawMessage{
+ json.RawMessage(`{"module": "acme"}`),
+ },
+ },
+ },
+ storage: &certmagic.FileStorage{Path: "testdata"},
+ },
+ checkState: func(t *testing.T, cfg *Config) {
+ if len(cfg.Admin.Identity.issuers) != 1 {
+ t.Fatalf("Expected 1 issuer, got %d", len(cfg.Admin.Identity.issuers))
+ }
+ mockIss, ok := cfg.Admin.Identity.issuers[0].(*mockIssuerModule)
+ if !ok {
+ t.Fatal("Expected mock issuer")
+ }
+ if mockIss.configSet == nil {
+ t.Error("Issuer config was not set")
+ }
+ },
+ },
+ {
+ name: "invalid issuer module",
+ cfg: &Config{
+ Admin: &AdminConfig{
+ Identity: &IdentityConfig{
+ Identifiers: []string{"localhost"},
+ IssuersRaw: []json.RawMessage{
+ json.RawMessage(`{"module": "doesnt_exist"}`),
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ if identityCertCache != nil {
+ // Reset the cert cache before each test
+ identityCertCache.Stop()
+ identityCertCache = nil
+ }
+
+ ctx := Context{
+ Context: context.Background(),
+ cfg: test.cfg,
+ moduleInstances: make(map[string][]Module),
+ }
+
+ err := manageIdentity(ctx, test.cfg)
+
+ if test.wantErr {
+ if err == nil {
+ t.Error("Expected error but got nil")
+ }
+ return
+ }
+ if err != nil {
+ t.Fatalf("Expected no error but got: %v", err)
+ }
+
+ if test.checkState != nil {
+ test.checkState(t, test.cfg)
+ }
+ })
+ }
+}
diff --git a/appveyor.yml b/appveyor.yml
deleted file mode 100644
index 36f1ff06338..00000000000
--- a/appveyor.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-version: "{build}"
-
-os: Windows Server 2012 R2
-
-clone_folder: c:\gopath\src\github.com\mholt\caddy
-
-environment:
- GOPATH: c:\gopath
-
-install:
- - rmdir c:\go /s /q
- - appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.3.windows-amd64.zip
- - 7z x go1.8.3.windows-amd64.zip -y -oC:\ > NUL
- - set PATH=%GOPATH%\bin;%PATH%
- - go version
- - go env
- - go get -t ./...
- - go get github.com/golang/lint/golint
- - go get github.com/FiloSottile/vendorcheck
- # Install gometalinter and certain linters
- - go get github.com/alecthomas/gometalinter
- - go get github.com/client9/misspell/cmd/misspell
- - go get github.com/gordonklaus/ineffassign
- - go get golang.org/x/tools/cmd/goimports
- - go get github.com/tsenart/deadcode
-
-build: off
-
-test_script:
- - gometalinter --disable-all -E vet -E gofmt -E misspell -E ineffassign -E goimports -E deadcode --tests --vendor ./...
- - vendorcheck ./...
- # TODO: When Go 1.9 comes out, replace this whole line with `go test -race ./...` b/c vendor folder should be ignored
- - for /f "" %%G in ('go list ./... ^| find /i /v "/vendor/"') do (go test -race %%G & IF ERRORLEVEL == 1 EXIT 1)
-
-after_test:
- # TODO: When Go 1.9 comes out, replace this whole line with `golint ./...` b/c vendor folder should be ignored
- - for /f "" %%G in ('go list ./... ^| find /i /v "/vendor/"') do (golint %%G & IF ERRORLEVEL == 1 EXIT 1)
-
-deploy: off
diff --git a/assets.go b/assets.go
deleted file mode 100644
index e353af8d355..00000000000
--- a/assets.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package caddy
-
-import (
- "os"
- "path/filepath"
- "runtime"
-)
-
-// AssetsPath returns the path to the folder
-// where the application may store data. If
-// CADDYPATH env variable is set, that value
-// is used. Otherwise, the path is the result
-// of evaluating "$HOME/.caddy".
-func AssetsPath() string {
- if caddyPath := os.Getenv("CADDYPATH"); caddyPath != "" {
- return caddyPath
- }
- return filepath.Join(userHomeDir(), ".caddy")
-}
-
-// userHomeDir returns the user's home directory according to
-// environment variables.
-//
-// Credit: http://stackoverflow.com/a/7922977/1048862
-func userHomeDir() string {
- if runtime.GOOS == "windows" {
- home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
- if home == "" {
- home = os.Getenv("USERPROFILE")
- }
- return home
- }
- return os.Getenv("HOME")
-}
diff --git a/assets_test.go b/assets_test.go
deleted file mode 100644
index 19336104827..00000000000
--- a/assets_test.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package caddy
-
-import (
- "os"
- "strings"
- "testing"
-)
-
-func TestAssetsPath(t *testing.T) {
- if actual := AssetsPath(); !strings.HasSuffix(actual, ".caddy") {
- t.Errorf("Expected path to be a .caddy folder, got: %v", actual)
- }
-
- os.Setenv("CADDYPATH", "testpath")
- if actual, expected := AssetsPath(), "testpath"; actual != expected {
- t.Errorf("Expected path to be %v, got: %v", expected, actual)
- }
- os.Setenv("CADDYPATH", "")
-}
diff --git a/caddy.go b/caddy.go
index ffab1454b83..758b0b2f6ff 100644
--- a/caddy.go
+++ b/caddy.go
@@ -1,928 +1,1091 @@
-// Package caddy implements the Caddy server manager.
+// Copyright 2015 Matthew Holt and The Caddy Authors
//
-// To use this package:
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
//
-// 1. Set the AppName and AppVersion variables.
-// 2. Call LoadCaddyfile() to get the Caddyfile.
-// Pass in the name of the server type (like "http").
-// Make sure the server type's package is imported
-// (import _ "github.com/mholt/caddy/caddyhttp").
-// 3. Call caddy.Start() to start Caddy. You get back
-// an Instance, on which you can call Restart() to
-// restart it or Stop() to stop it.
+// http://www.apache.org/licenses/LICENSE-2.0
//
-// You should call Wait() on your instance to wait for
-// all servers to quit before your process exits.
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package caddy
import (
"bytes"
- "encoding/gob"
+ "context"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"log"
- "net"
+ "net/http"
"os"
+ "path"
+ "path/filepath"
+ "runtime/debug"
"strconv"
"strings"
"sync"
+ "sync/atomic"
"time"
- "github.com/mholt/caddy/caddyfile"
-)
-
-// Configurable application parameters
-var (
- // AppName is the name of the application.
- AppName string
-
- // AppVersion is the version of the application.
- AppVersion string
-
- // Quiet mode will not show any informative output on initialization.
- Quiet bool
-
- // PidFile is the path to the pidfile to create.
- PidFile string
-
- // GracefulTimeout is the maximum duration of a graceful shutdown.
- GracefulTimeout time.Duration
-
- // isUpgrade will be set to true if this process
- // was started as part of an upgrade, where a parent
- // Caddy process started this one.
- isUpgrade = os.Getenv("CADDY__UPGRADE") == "1"
-
- // started will be set to true when the first
- // instance is started; it never gets set to
- // false after that.
- started bool
+ "github.com/caddyserver/certmagic"
+ "github.com/google/uuid"
+ "go.uber.org/zap"
- // mu protects the variables 'isUpgrade' and 'started'.
- mu sync.Mutex
+ "github.com/caddyserver/caddy/v2/internal/filesystems"
+ "github.com/caddyserver/caddy/v2/notify"
)
-// Instance contains the state of servers created as a result of
-// calling Start and can be used to access or control those servers.
-type Instance struct {
- // serverType is the name of the instance's server type
- serverType string
-
- // caddyfileInput is the input configuration text used for this process
- caddyfileInput Input
-
- // wg is used to wait for all servers to shut down
- wg *sync.WaitGroup
-
- // context is the context created for this instance.
- context Context
-
- // servers is the list of servers with their listeners.
- servers []ServerListener
+// Config is the top (or beginning) of the Caddy configuration structure.
+// Caddy config is expressed natively as a JSON document. If you prefer
+// not to work with JSON directly, there are [many config adapters](/docs/config-adapters)
+// available that can convert various inputs into Caddy JSON.
+//
+// Many parts of this config are extensible through the use of Caddy modules.
+// Fields which have a json.RawMessage type and which appear as dots (•••) in
+// the online docs can be fulfilled by modules in a certain module
+// namespace. The docs show which modules can be used in a given place.
+//
+// Whenever a module is used, its name must be given either inline as part of
+// the module, or as the key to the module's value. The docs will make it clear
+// which to use.
+//
+// Generally, all config settings are optional, as it is Caddy convention to
+// have good, documented default values. If a parameter is required, the docs
+// should say so.
+//
+// Go programs which are directly building a Config struct value should take
+// care to populate the JSON-encodable fields of the struct (i.e. the fields
+// with `json` struct tags) if employing the module lifecycle (e.g. Provision
+// method calls).
+type Config struct {
+ Admin *AdminConfig `json:"admin,omitempty"`
+ Logging *Logging `json:"logging,omitempty"`
+
+ // StorageRaw is a storage module that defines how/where Caddy
+ // stores assets (such as TLS certificates). The default storage
+ // module is `caddy.storage.file_system` (the local file system),
+ // and the default path
+ // [depends on the OS and environment](/docs/conventions#data-directory).
+ StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"`
+
+ // AppsRaw are the apps that Caddy will load and run. The
+ // app module name is the key, and the app's config is the
+ // associated value.
+ AppsRaw ModuleMap `json:"apps,omitempty" caddy:"namespace="`
+
+ apps map[string]App
+ storage certmagic.Storage
+
+ cancelFunc context.CancelFunc
+
+ // filesystems is a dict of filesystems that will later be loaded from and added to.
+ filesystems FileSystems
+}
- // these callbacks execute when certain events occur
- onFirstStartup []func() error // starting, not as part of a restart
- onStartup []func() error // starting, even as part of a restart
- onRestart []func() error // before restart commences
- onShutdown []func() error // stopping, even as part of a restart
- onFinalShutdown []func() error // stopping, not as part of a restart
+// App is a thing that Caddy runs.
+type App interface {
+ Start() error
+ Stop() error
}
-// Servers returns the ServerListeners in i.
-func (i *Instance) Servers() []ServerListener { return i.servers }
-
-// Stop stops all servers contained in i. It does NOT
-// execute shutdown callbacks.
-func (i *Instance) Stop() error {
- // stop the servers
- for _, s := range i.servers {
- if gs, ok := s.server.(GracefulServer); ok {
- if err := gs.Stop(); err != nil {
- log.Printf("[ERROR] Stopping %s: %v", gs.Address(), err)
- }
- }
+// Run runs the given config, replacing any existing config.
+func Run(cfg *Config) error {
+ cfgJSON, err := json.Marshal(cfg)
+ if err != nil {
+ return err
}
+ return Load(cfgJSON, true)
+}
- // splice i out of instance list, causing it to be garbage-collected
- instancesMu.Lock()
- for j, other := range instances {
- if other == i {
- instances = append(instances[:j], instances[j+1:]...)
- break
- }
+// Load loads the given config JSON and runs it only
+// if it is different from the current config or
+// forceReload is true.
+func Load(cfgJSON []byte, forceReload bool) error {
+ if err := notify.Reloading(); err != nil {
+ Log().Error("unable to notify service manager of reloading state", zap.Error(err))
}
- instancesMu.Unlock()
-
- return nil
-}
-// ShutdownCallbacks executes all the shutdown callbacks of i,
-// including ones that are scheduled only for the final shutdown
-// of i. An error returned from one does not stop execution of
-// the rest. All the non-nil errors will be returned.
-func (i *Instance) ShutdownCallbacks() []error {
- var errs []error
- for _, shutdownFunc := range i.onShutdown {
- err := shutdownFunc()
+ // after reload, notify system of success or, if
+ // failure, update with status (error message)
+ var err error
+ defer func() {
if err != nil {
- errs = append(errs, err)
+ if notifyErr := notify.Error(err, 0); notifyErr != nil {
+ Log().Error("unable to notify to service manager of reload error",
+ zap.Error(notifyErr),
+ zap.String("reload_err", err.Error()))
+ }
+ return
}
- }
- for _, finalShutdownFunc := range i.onFinalShutdown {
- err := finalShutdownFunc()
- if err != nil {
- errs = append(errs, err)
+ if err := notify.Ready(); err != nil {
+ Log().Error("unable to notify to service manager of ready state", zap.Error(err))
}
+ }()
+
+ err = changeConfig(http.MethodPost, "/"+rawConfigKey, cfgJSON, "", forceReload)
+ if errors.Is(err, errSameConfig) {
+ err = nil // not really an error
}
- return errs
+
+ return err
}
-// Restart replaces the servers in i with new servers created from
-// executing the newCaddyfile. Upon success, it returns the new
-// instance to replace i. Upon failure, i will not be replaced.
-func (i *Instance) Restart(newCaddyfile Input) (*Instance, error) {
- log.Println("[INFO] Reloading")
+// changeConfig changes the current config (rawCfg) according to the
+// method, traversed via the given path, and uses the given input as
+// the new value (if applicable; i.e. "DELETE" doesn't have an input).
+// If the resulting config is the same as the previous, no reload will
+// occur unless forceReload is true. If the config is unchanged and not
+// forcefully reloaded, then errConfigUnchanged This function is safe for
+// concurrent use.
+// The ifMatchHeader can optionally be given a string of the format:
+//
+// ""
+//
+// where is the absolute path in the config and is the expected hash of
+// the config at that path. If the hash in the ifMatchHeader doesn't match
+// the hash of the config, then an APIError with status 412 will be returned.
+func changeConfig(method, path string, input []byte, ifMatchHeader string, forceReload bool) error {
+ switch method {
+ case http.MethodGet,
+ http.MethodHead,
+ http.MethodOptions,
+ http.MethodConnect,
+ http.MethodTrace:
+ return fmt.Errorf("method not allowed")
+ }
- i.wg.Add(1)
- defer i.wg.Done()
+ rawCfgMu.Lock()
+ defer rawCfgMu.Unlock()
- // run restart callbacks
- for _, fn := range i.onRestart {
- err := fn()
+ if ifMatchHeader != "" {
+ // expect the first and last character to be quotes
+ if len(ifMatchHeader) < 2 || ifMatchHeader[0] != '"' || ifMatchHeader[len(ifMatchHeader)-1] != '"' {
+ return APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("malformed If-Match header; expect quoted string"),
+ }
+ }
+
+ // read out the parts
+ parts := strings.Fields(ifMatchHeader[1 : len(ifMatchHeader)-1])
+ if len(parts) != 2 {
+ return APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("malformed If-Match header; expect format \"\""),
+ }
+ }
+
+ // get the current hash of the config
+ // at the given path
+ hash := etagHasher()
+ err := unsyncedConfigAccess(http.MethodGet, parts[0], nil, hash)
if err != nil {
- return i, err
+ return err
+ }
+
+ if hex.EncodeToString(hash.Sum(nil)) != parts[1] {
+ return APIError{
+ HTTPStatus: http.StatusPreconditionFailed,
+ Err: fmt.Errorf("If-Match header did not match current config hash"),
+ }
}
}
- if newCaddyfile == nil {
- newCaddyfile = i.caddyfileInput
+ err := unsyncedConfigAccess(method, path, input, nil)
+ if err != nil {
+ return err
}
- // Add file descriptors of all the sockets that are capable of it
- restartFds := make(map[string]restartTriple)
- for _, s := range i.servers {
- gs, srvOk := s.server.(GracefulServer)
- ln, lnOk := s.listener.(Listener)
- pc, pcOk := s.packet.(PacketConn)
- if srvOk {
- if lnOk && pcOk {
- restartFds[gs.Address()] = restartTriple{server: gs, listener: ln, packet: pc}
- continue
- }
- if lnOk {
- restartFds[gs.Address()] = restartTriple{server: gs, listener: ln}
- continue
- }
- if pcOk {
- restartFds[gs.Address()] = restartTriple{server: gs, packet: pc}
- continue
- }
+ // the mutation is complete, so encode the entire config as JSON
+ newCfg, err := json.Marshal(rawCfg[rawConfigKey])
+ if err != nil {
+ return APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("encoding new config: %v", err),
}
}
- // create new instance; if the restart fails, it is simply discarded
- newInst := &Instance{serverType: newCaddyfile.ServerType(), wg: i.wg}
+ // if nothing changed, no need to do a whole reload unless the client forces it
+ if !forceReload && bytes.Equal(rawCfgJSON, newCfg) {
+ Log().Info("config is unchanged")
+ return errSameConfig
+ }
- // attempt to start new instance
- err := startWithListenerFds(newCaddyfile, newInst, restartFds)
+ // find any IDs in this config and index them
+ idx := make(map[string]string)
+ err = indexConfigObjects(rawCfg[rawConfigKey], "/"+rawConfigKey, idx)
if err != nil {
- return i, err
+ return APIError{
+ HTTPStatus: http.StatusInternalServerError,
+ Err: fmt.Errorf("indexing config: %v", err),
+ }
}
- // success! stop the old instance
- for _, shutdownFunc := range i.onShutdown {
- err := shutdownFunc()
- if err != nil {
- return i, err
+ // load this new config; if it fails, we need to revert to
+ // our old representation of caddy's actual config
+ err = unsyncedDecodeAndRun(newCfg, true)
+ if err != nil {
+ if len(rawCfgJSON) > 0 {
+ // restore old config state to keep it consistent
+ // with what caddy is still running; we need to
+ // unmarshal it again because it's likely that
+ // pointers deep in our rawCfg map were modified
+ var oldCfg any
+ err2 := json.Unmarshal(rawCfgJSON, &oldCfg)
+ if err2 != nil {
+ err = fmt.Errorf("%v; additionally, restoring old config: %v", err, err2)
+ }
+ rawCfg[rawConfigKey] = oldCfg
}
+
+ return fmt.Errorf("loading new config: %v", err)
}
- i.Stop()
- log.Println("[INFO] Reloading complete")
+ // success, so update our stored copy of the encoded
+ // config to keep it consistent with what caddy is now
+ // running (storing an encoded copy is not strictly
+ // necessary, but avoids an extra json.Marshal for
+ // each config change)
+ rawCfgJSON = newCfg
+ rawCfgIndex = idx
- return newInst, nil
+ return nil
}
-// SaveServer adds s and its associated listener ln to the
-// internally-kept list of servers that is running. For
-// saved servers, graceful restarts will be provided.
-func (i *Instance) SaveServer(s Server, ln net.Listener) {
- i.servers = append(i.servers, ServerListener{server: s, listener: ln})
+// readConfig traverses the current config to path
+// and writes its JSON encoding to out.
+func readConfig(path string, out io.Writer) error {
+ rawCfgMu.RLock()
+ defer rawCfgMu.RUnlock()
+ return unsyncedConfigAccess(http.MethodGet, path, nil, out)
}
-// HasListenerWithAddress returns whether this package is
-// tracking a server using a listener with the address
-// addr.
-func HasListenerWithAddress(addr string) bool {
- instancesMu.Lock()
- defer instancesMu.Unlock()
- for _, inst := range instances {
- for _, sln := range inst.servers {
- if listenerAddrEqual(sln.listener, addr) {
- return true
+// indexConfigObjects recursively searches ptr for object fields named
+// "@id" and maps that ID value to the full configPath in the index.
+// This function is NOT safe for concurrent access; obtain a write lock
+// on currentCtxMu.
+func indexConfigObjects(ptr any, configPath string, index map[string]string) error {
+ switch val := ptr.(type) {
+ case map[string]any:
+ for k, v := range val {
+ if k == idKey {
+ switch idVal := v.(type) {
+ case string:
+ index[idVal] = configPath
+ case float64: // all JSON numbers decode as float64
+ index[fmt.Sprintf("%v", idVal)] = configPath
+ default:
+ return fmt.Errorf("%s: %s field must be a string or number", configPath, idKey)
+ }
+ continue
+ }
+ // traverse this object property recursively
+ err := indexConfigObjects(val[k], path.Join(configPath, k), index)
+ if err != nil {
+ return err
+ }
+ }
+ case []any:
+ // traverse each element of the array recursively
+ for i := range val {
+ err := indexConfigObjects(val[i], path.Join(configPath, strconv.Itoa(i)), index)
+ if err != nil {
+ return err
}
}
}
- return false
+
+ return nil
}
-// listenerAddrEqual compares a listener's address with
-// addr. Extra care is taken to match addresses with an
-// empty hostname portion, as listeners tend to report
-// [::]:80, for example, when the matching address that
-// created the listener might be simply :80.
-func listenerAddrEqual(ln net.Listener, addr string) bool {
- lnAddr := ln.Addr().String()
- hostname, port, err := net.SplitHostPort(addr)
+// unsyncedDecodeAndRun removes any meta fields (like @id tags)
+// from cfgJSON, decodes the result into a *Config, and runs
+// it as the new config, replacing any other current config.
+// It does NOT update the raw config state, as this is a
+// lower-level function; most callers will want to use Load
+// instead. A write lock on rawCfgMu is required! If
+// allowPersist is false, it will not be persisted to disk,
+// even if it is configured to.
+func unsyncedDecodeAndRun(cfgJSON []byte, allowPersist bool) error {
+ // remove any @id fields from the JSON, which would cause
+ // loading to break since the field wouldn't be recognized
+ strippedCfgJSON := RemoveMetaFields(cfgJSON)
+
+ var newCfg *Config
+ err := StrictUnmarshalJSON(strippedCfgJSON, &newCfg)
if err != nil {
- return lnAddr == addr
+ return err
}
- if lnAddr == net.JoinHostPort("::", port) {
- return true
+
+ // prevent recursive config loads; that is a user error, and
+ // although frequent config loads should be safe, we cannot
+ // guarantee that in the presence of third party plugins, nor
+ // do we want this error to go unnoticed (we assume it was a
+ // pulled config if we're not allowed to persist it)
+ if !allowPersist &&
+ newCfg != nil &&
+ newCfg.Admin != nil &&
+ newCfg.Admin.Config != nil &&
+ newCfg.Admin.Config.LoadRaw != nil &&
+ newCfg.Admin.Config.LoadDelay <= 0 {
+ return fmt.Errorf("recursive config loading detected: pulled configs cannot pull other configs without positive load_delay")
}
- if lnAddr == net.JoinHostPort("0.0.0.0", port) {
- return true
+
+ // run the new config and start all its apps
+ ctx, err := run(newCfg, true)
+ if err != nil {
+ return err
}
- return hostname != "" && lnAddr == addr
-}
-// TCPServer is a type that can listen and serve connections.
-// A TCPServer must associate with exactly zero or one net.Listeners.
-type TCPServer interface {
- // Listen starts listening by creating a new listener
- // and returning it. It does not start accepting
- // connections. For UDP-only servers, this method
- // can be a no-op that returns (nil, nil).
- Listen() (net.Listener, error)
-
- // Serve starts serving using the provided listener.
- // Serve must start the server loop nearly immediately,
- // or at least not return any errors before the server
- // loop begins. Serve blocks indefinitely, or in other
- // words, until the server is stopped. For UDP-only
- // servers, this method can be a no-op that returns nil.
- Serve(net.Listener) error
-}
+ // swap old context (including its config) with the new one
+ currentCtxMu.Lock()
+ oldCtx := currentCtx
+ currentCtx = ctx
+ currentCtxMu.Unlock()
+
+ // Stop, Cleanup each old app
+ unsyncedStop(oldCtx)
+
+ // autosave a non-nil config, if not disabled
+ if allowPersist &&
+ newCfg != nil &&
+ (newCfg.Admin == nil ||
+ newCfg.Admin.Config == nil ||
+ newCfg.Admin.Config.Persist == nil ||
+ *newCfg.Admin.Config.Persist) {
+ dir := filepath.Dir(ConfigAutosavePath)
+ err := os.MkdirAll(dir, 0o700)
+ if err != nil {
+ Log().Error("unable to create folder for config autosave",
+ zap.String("dir", dir),
+ zap.Error(err))
+ } else {
+ err := os.WriteFile(ConfigAutosavePath, cfgJSON, 0o600)
+ if err == nil {
+ Log().Info("autosaved config (load with --resume flag)", zap.String("file", ConfigAutosavePath))
+ } else {
+ Log().Error("unable to autosave config",
+ zap.String("file", ConfigAutosavePath),
+ zap.Error(err))
+ }
+ }
+ }
-// UDPServer is a type that can listen and serve packets.
-// A UDPServer must associate with exactly zero or one net.PacketConns.
-type UDPServer interface {
- // ListenPacket starts listening by creating a new packetconn
- // and returning it. It does not start accepting connections.
- // TCP-only servers may leave this method blank and return
- // (nil, nil).
- ListenPacket() (net.PacketConn, error)
-
- // ServePacket starts serving using the provided packetconn.
- // ServePacket must start the server loop nearly immediately,
- // or at least not return any errors before the server
- // loop begins. ServePacket blocks indefinitely, or in other
- // words, until the server is stopped. For TCP-only servers,
- // this method can be a no-op that returns nil.
- ServePacket(net.PacketConn) error
+ return nil
}
-// Server is a type that can listen and serve. It supports both
-// TCP and UDP, although the UDPServer interface can be used
-// for more than just UDP.
+// run runs newCfg and starts all its apps if
+// start is true. If any errors happen, cleanup
+// is performed if any modules were provisioned;
+// apps that were started already will be stopped,
+// so this function should not leak resources if
+// an error is returned. However, if no error is
+// returned and start == false, you should cancel
+// the config if you are not going to start it,
+// so that each provisioned module will be
+// cleaned up.
//
-// If the server uses TCP, it should implement TCPServer completely.
-// If it uses UDP or some other protocol, it should implement
-// UDPServer completely. If it uses both, both interfaces should be
-// fully implemented. Any unimplemented methods should be made as
-// no-ops that simply return nil values.
-type Server interface {
- TCPServer
- UDPServer
-}
-
-// Stopper is a type that can stop serving. The stop
-// does not necessarily have to be graceful.
-type Stopper interface {
- // Stop stops the server. It blocks until the
- // server is completely stopped.
- Stop() error
-}
+// This is a low-level function; most callers
+// will want to use Run instead, which also
+// updates the config's raw state.
+func run(newCfg *Config, start bool) (Context, error) {
+ ctx, err := provisionContext(newCfg, start)
+ if err != nil {
+ globalMetrics.configSuccess.Set(0)
+ return ctx, err
+ }
-// GracefulServer is a Server and Stopper, the stopping
-// of which is graceful (whatever that means for the kind
-// of server being implemented). It must be able to return
-// the address it is configured to listen on so that its
-// listener can be paired with it upon graceful restarts.
-// The net.Listener that a GracefulServer creates must
-// implement the Listener interface for restarts to be
-// graceful (assuming the listener is for TCP).
-type GracefulServer interface {
- Server
- Stopper
-
- // Address returns the address the server should
- // listen on; it is used to pair the server to
- // its listener during a graceful/zero-downtime
- // restart. Thus when implementing this method,
- // you must not access a listener to get the
- // address; you must store the address the
- // server is to serve on some other way.
- Address() string
-}
+ if !start {
+ return ctx, nil
+ }
-// Listener is a net.Listener with an underlying file descriptor.
-// A server's listener should implement this interface if it is
-// to support zero-downtime reloads.
-type Listener interface {
- net.Listener
- File() (*os.File, error)
-}
+ // Provision any admin routers which may need to access
+ // some of the other apps at runtime
+ err = ctx.cfg.Admin.provisionAdminRouters(ctx)
+ if err != nil {
+ globalMetrics.configSuccess.Set(0)
+ return ctx, err
+ }
-// PacketConn is a net.PacketConn with an underlying file descriptor.
-// A server's packetconn should implement this interface if it is
-// to support zero-downtime reloads (in sofar this holds true for datagram
-// connections).
-type PacketConn interface {
- net.PacketConn
- File() (*os.File, error)
+ // Start
+ err = func() error {
+ started := make([]string, 0, len(ctx.cfg.apps))
+ for name, a := range ctx.cfg.apps {
+ err := a.Start()
+ if err != nil {
+ // an app failed to start, so we need to stop
+ // all other apps that were already started
+ for _, otherAppName := range started {
+ err2 := ctx.cfg.apps[otherAppName].Stop()
+ if err2 != nil {
+ err = fmt.Errorf("%v; additionally, aborting app %s: %v",
+ err, otherAppName, err2)
+ }
+ }
+ return fmt.Errorf("%s app module: start: %v", name, err)
+ }
+ started = append(started, name)
+ }
+ return nil
+ }()
+ if err != nil {
+ globalMetrics.configSuccess.Set(0)
+ return ctx, err
+ }
+ globalMetrics.configSuccess.Set(1)
+ globalMetrics.configSuccessTime.SetToCurrentTime()
+ // now that the user's config is running, finish setting up anything else,
+ // such as remote admin endpoint, config loader, etc.
+ return ctx, finishSettingUp(ctx, ctx.cfg)
}
-// AfterStartup is an interface that can be implemented
-// by a server type that wants to run some code after all
-// servers for the same Instance have started.
-type AfterStartup interface {
- OnStartupComplete()
-}
+// provisionContext creates a new context from the given configuration and provisions
+// storage and apps.
+// If `newCfg` is nil a new empty configuration will be created.
+// If `replaceAdminServer` is true any currently active admin server will be replaced
+// with a new admin server based on the provided configuration.
+func provisionContext(newCfg *Config, replaceAdminServer bool) (Context, error) {
+ // because we will need to roll back any state
+ // modifications if this function errors, we
+ // keep a single error value and scope all
+ // sub-operations to their own functions to
+ // ensure this error value does not get
+ // overridden or missed when it should have
+ // been set by a short assignment
+ var err error
+
+ if newCfg == nil {
+ newCfg = new(Config)
+ }
-// LoadCaddyfile loads a Caddyfile by calling the plugged in
-// Caddyfile loader methods. An error is returned if more than
-// one loader returns a non-nil Caddyfile input. If no loaders
-// load a Caddyfile, the default loader is used. If no default
-// loader is registered or it returns nil, the server type's
-// default Caddyfile is loaded. If the server type does not
-// specify any default Caddyfile value, then an empty Caddyfile
-// is returned. Consequently, this function never returns a nil
-// value as long as there are no errors.
-func LoadCaddyfile(serverType string) (Input, error) {
- // If we are finishing an upgrade, we must obtain the Caddyfile
- // from our parent process, regardless of configured loaders.
- if IsUpgrade() {
- err := gob.NewDecoder(os.Stdin).Decode(&loadedGob)
+ // create a context within which to load
+ // modules - essentially our new config's
+ // execution environment; be sure that
+ // cleanup occurs when we return if there
+ // was an error; if no error, it will get
+ // cleaned up on next config cycle
+ ctx, cancel := NewContext(Context{Context: context.Background(), cfg: newCfg})
+ defer func() {
if err != nil {
- return nil, err
+ globalMetrics.configSuccess.Set(0)
+ // if there were any errors during startup,
+ // we should cancel the new context we created
+ // since the associated config won't be used;
+ // this will cause all modules that were newly
+ // provisioned to clean themselves up
+ cancel()
+
+ // also undo any other state changes we made
+ if currentCtx.cfg != nil {
+ certmagic.Default.Storage = currentCtx.cfg.storage
+ }
}
- return loadedGob.Caddyfile, nil
- }
+ }()
+ newCfg.cancelFunc = cancel // clean up later
- // Ask plugged-in loaders for a Caddyfile
- cdyfile, err := loadCaddyfileInput(serverType)
- if err != nil {
- return nil, err
+ // set up logging before anything bad happens
+ if newCfg.Logging == nil {
+ newCfg.Logging = new(Logging)
}
-
- // Otherwise revert to default
- if cdyfile == nil {
- cdyfile = DefaultInput(serverType)
+ err = newCfg.Logging.openLogs(ctx)
+ if err != nil {
+ return ctx, err
}
- // Still nil? Geez.
- if cdyfile == nil {
- cdyfile = CaddyfileInput{ServerTypeName: serverType}
+ // start the admin endpoint (and stop any prior one)
+ if replaceAdminServer {
+ err = replaceLocalAdminServer(newCfg, ctx)
+ if err != nil {
+ return ctx, fmt.Errorf("starting caddy administration endpoint: %v", err)
+ }
}
- return cdyfile, nil
-}
+ // create the new filesystem map
+ newCfg.filesystems = &filesystems.FilesystemMap{}
-// Wait blocks until all of i's servers have stopped.
-func (i *Instance) Wait() {
- i.wg.Wait()
-}
+ // prepare the new config for use
+ newCfg.apps = make(map[string]App)
-// CaddyfileFromPipe loads the Caddyfile input from f if f is
-// not interactive input. f is assumed to be a pipe or stream,
-// such as os.Stdin. If f is not a pipe, no error is returned
-// but the Input value will be nil. An error is only returned
-// if there was an error reading the pipe, even if the length
-// of what was read is 0.
-func CaddyfileFromPipe(f *os.File, serverType string) (Input, error) {
- fi, err := f.Stat()
- if err == nil && fi.Mode()&os.ModeCharDevice == 0 {
- // Note that a non-nil error is not a problem. Windows
- // will not create a stdin if there is no pipe, which
- // produces an error when calling Stat(). But Unix will
- // make one either way, which is why we also check that
- // bitmask.
- // NOTE: Reading from stdin after this fails (e.g. for the let's encrypt email address) (OS X)
- confBody, err := ioutil.ReadAll(f)
- if err != nil {
- return nil, err
+ // set up global storage and make it CertMagic's default storage, too
+ err = func() error {
+ if newCfg.StorageRaw != nil {
+ val, err := ctx.LoadModule(newCfg, "StorageRaw")
+ if err != nil {
+ return fmt.Errorf("loading storage module: %v", err)
+ }
+ stor, err := val.(StorageConverter).CertMagicStorage()
+ if err != nil {
+ return fmt.Errorf("creating storage value: %v", err)
+ }
+ newCfg.storage = stor
}
- return CaddyfileInput{
- Contents: confBody,
- Filepath: f.Name(),
- ServerTypeName: serverType,
- }, nil
- }
- // not having input from the pipe is not itself an error,
- // just means no input to return.
- return nil, nil
-}
-
-// Caddyfile returns the Caddyfile used to create i.
-func (i *Instance) Caddyfile() Input {
- return i.caddyfileInput
-}
+ if newCfg.storage == nil {
+ newCfg.storage = DefaultStorage
+ }
+ certmagic.Default.Storage = newCfg.storage
-// Start starts Caddy with the given Caddyfile.
-//
-// This function blocks until all the servers are listening.
-func Start(cdyfile Input) (*Instance, error) {
- inst := &Instance{serverType: cdyfile.ServerType(), wg: new(sync.WaitGroup)}
- err := startWithListenerFds(cdyfile, inst, nil)
+ return nil
+ }()
if err != nil {
- return inst, err
+ return ctx, err
}
- signalSuccessToParent()
- if pidErr := writePidFile(); pidErr != nil {
- log.Printf("[ERROR] Could not write pidfile: %v", pidErr)
- }
- return inst, nil
+
+ // Load and Provision each app and their submodules
+ err = func() error {
+ for appName := range newCfg.AppsRaw {
+ if _, err := ctx.App(appName); err != nil {
+ return err
+ }
+ }
+ return nil
+ }()
+ return ctx, err
}
-func startWithListenerFds(cdyfile Input, inst *Instance, restartFds map[string]restartTriple) error {
- if cdyfile == nil {
- cdyfile = CaddyfileInput{}
- }
+// ProvisionContext creates a new context from the configuration and provisions storage
+// and app modules.
+// The function is intended for testing and advanced use cases only, typically `Run` should be
+// use to ensure a fully functional caddy instance.
+// EXPERIMENTAL: While this is public the interface and implementation details of this function may change.
+func ProvisionContext(newCfg *Config) (Context, error) {
+ return provisionContext(newCfg, false)
+}
- err := ValidateAndExecuteDirectives(cdyfile, inst, false)
+// finishSettingUp should be run after all apps have successfully started.
+func finishSettingUp(ctx Context, cfg *Config) error {
+ // establish this server's identity (only after apps are loaded
+ // so that cert management of this endpoint doesn't prevent user's
+ // servers from starting which likely also use HTTP/HTTPS ports;
+ // but before remote management which may depend on these creds)
+ err := manageIdentity(ctx, cfg)
if err != nil {
- return err
+ return fmt.Errorf("provisioning remote admin endpoint: %v", err)
}
- slist, err := inst.context.MakeServers()
+ // replace any remote admin endpoint
+ err = replaceRemoteAdminServer(ctx, cfg)
if err != nil {
- return err
+ return fmt.Errorf("provisioning remote admin endpoint: %v", err)
}
- // run startup callbacks
- if !IsUpgrade() && restartFds == nil {
- // first startup means not a restart or upgrade
- for _, firstStartupFunc := range inst.onFirstStartup {
- err := firstStartupFunc()
- if err != nil {
- return err
- }
- }
- }
- for _, startupFunc := range inst.onStartup {
- err := startupFunc()
+ // if dynamic config is requested, set that up and run it
+ if cfg != nil && cfg.Admin != nil && cfg.Admin.Config != nil && cfg.Admin.Config.LoadRaw != nil {
+ val, err := ctx.LoadModule(cfg.Admin.Config, "LoadRaw")
if err != nil {
- return err
+ return fmt.Errorf("loading config loader module: %s", err)
}
- }
- err = startServers(slist, inst, restartFds)
- if err != nil {
- return err
- }
-
- instancesMu.Lock()
- instances = append(instances, inst)
- instancesMu.Unlock()
+ logger := Log().Named("config_loader").With(
+ zap.String("module", val.(Module).CaddyModule().ID.Name()),
+ zap.Int("load_delay", int(cfg.Admin.Config.LoadDelay)))
- // run any AfterStartup callbacks if this is not
- // part of a restart; then show file descriptor notice
- if restartFds == nil {
- for _, srvln := range inst.servers {
- if srv, ok := srvln.server.(AfterStartup); ok {
- srv.OnStartupComplete()
+ runLoadedConfig := func(config []byte) error {
+ logger.Info("applying dynamically-loaded config")
+ err := changeConfig(http.MethodPost, "/"+rawConfigKey, config, "", false)
+ if errors.Is(err, errSameConfig) {
+ return err
+ }
+ if err != nil {
+ logger.Error("failed to run dynamically-loaded config", zap.Error(err))
+ return err
}
+ logger.Info("successfully applied dynamically-loaded config")
+ return nil
}
- if !Quiet {
- for _, srvln := range inst.servers {
- if !IsLoopback(srvln.listener.Addr().String()) {
- checkFdlimit()
+
+ if cfg.Admin.Config.LoadDelay > 0 {
+ go func() {
+ // the loop is here to iterate ONLY if there is an error, a no-op config load,
+ // or an unchanged config; in which case we simply wait the delay and try again
+ for {
+ timer := time.NewTimer(time.Duration(cfg.Admin.Config.LoadDelay))
+ select {
+ case <-timer.C:
+ loadedConfig, err := val.(ConfigLoader).LoadConfig(ctx)
+ if err != nil {
+ logger.Error("failed loading dynamic config; will retry", zap.Error(err))
+ continue
+ }
+ if loadedConfig == nil {
+ logger.Info("dynamically-loaded config was nil; will retry")
+ continue
+ }
+ err = runLoadedConfig(loadedConfig)
+ if errors.Is(err, errSameConfig) {
+ logger.Info("dynamically-loaded config was unchanged; will retry")
+ continue
+ }
+ case <-ctx.Done():
+ if !timer.Stop() {
+ <-timer.C
+ }
+ logger.Info("stopping dynamic config loading")
+ }
break
}
+ }()
+ } else {
+ // if no LoadDelay is provided, will load config synchronously
+ loadedConfig, err := val.(ConfigLoader).LoadConfig(ctx)
+ if err != nil {
+ return fmt.Errorf("loading dynamic config from %T: %v", val, err)
}
+ // do this in a goroutine so current config can finish being loaded; otherwise deadlock
+ go func() { _ = runLoadedConfig(loadedConfig) }()
}
}
- mu.Lock()
- started = true
- mu.Unlock()
-
return nil
}
-// ValidateAndExecuteDirectives will load the server blocks from cdyfile
-// by parsing it, then execute the directives configured by it and store
-// the resulting server blocks into inst. If justValidate is true, parse
-// callbacks will not be executed between directives, since the purpose
-// is only to check the input for valid syntax.
-func ValidateAndExecuteDirectives(cdyfile Input, inst *Instance, justValidate bool) error {
- // If parsing only inst will be nil, create an instance for this function call only.
- if justValidate {
- inst = &Instance{serverType: cdyfile.ServerType(), wg: new(sync.WaitGroup)}
- }
+// ConfigLoader is a type that can load a Caddy config. If
+// the return value is non-nil, it must be valid Caddy JSON;
+// if nil or with non-nil error, it is considered to be a
+// no-op load and may be retried later.
+type ConfigLoader interface {
+ LoadConfig(Context) ([]byte, error)
+}
- stypeName := cdyfile.ServerType()
+// Stop stops running the current configuration.
+// It is the antithesis of Run(). This function
+// will log any errors that occur during the
+// stopping of individual apps and continue to
+// stop the others. Stop should only be called
+// if not replacing with a new config.
+func Stop() error {
+ currentCtxMu.RLock()
+ ctx := currentCtx
+ currentCtxMu.RUnlock()
- stype, err := getServerType(stypeName)
- if err != nil {
- return err
- }
+ rawCfgMu.Lock()
+ unsyncedStop(ctx)
- inst.caddyfileInput = cdyfile
+ currentCtxMu.Lock()
+ currentCtx = Context{}
+ currentCtxMu.Unlock()
- sblocks, err := loadServerBlocks(stypeName, cdyfile.Path(), bytes.NewReader(cdyfile.Body()))
- if err != nil {
- return err
- }
+ rawCfgJSON = nil
+ rawCfgIndex = nil
+ rawCfg[rawConfigKey] = nil
+ rawCfgMu.Unlock()
- inst.context = stype.NewContext()
- if inst.context == nil {
- return fmt.Errorf("server type %s produced a nil Context", stypeName)
- }
+ return nil
+}
- sblocks, err = inst.context.InspectServerBlocks(cdyfile.Path(), sblocks)
- if err != nil {
- return err
+// unsyncedStop stops ctx from running, but has
+// no locking around ctx. It is a no-op if ctx has a
+// nil cfg. If any app returns an error when stopping,
+// it is logged and the function continues stopping
+// the next app. This function assumes all apps in
+// ctx were successfully started first.
+//
+// A lock on rawCfgMu is required, even though this
+// function does not access rawCfg, that lock
+// synchronizes the stop/start of apps.
+func unsyncedStop(ctx Context) {
+ if ctx.cfg == nil {
+ return
}
- err = executeDirectives(inst, cdyfile.Path(), stype.Directives(), sblocks, justValidate)
- if err != nil {
- return err
+ // stop each app
+ for name, a := range ctx.cfg.apps {
+ err := a.Stop()
+ if err != nil {
+ log.Printf("[ERROR] stop %s: %v", name, err)
+ }
}
- return nil
+ // clean up all modules
+ ctx.cfg.cancelFunc()
}
-func executeDirectives(inst *Instance, filename string,
- directives []string, sblocks []caddyfile.ServerBlock, justValidate bool) error {
- // map of server block ID to map of directive name to whatever.
- storages := make(map[int]map[string]interface{})
-
- // It is crucial that directives are executed in the proper order.
- // We loop with the directives on the outer loop so we execute
- // a directive for all server blocks before going to the next directive.
- // This is important mainly due to the parsing callbacks (below).
- for _, dir := range directives {
- for i, sb := range sblocks {
- var once sync.Once
- if _, ok := storages[i]; !ok {
- storages[i] = make(map[string]interface{})
- }
+// Validate loads, provisions, and validates
+// cfg, but does not start running it.
+func Validate(cfg *Config) error {
+ _, err := run(cfg, false)
+ if err == nil {
+ cfg.cancelFunc() // call Cleanup on all modules
+ }
+ return err
+}
- for j, key := range sb.Keys {
- // Execute directive if it is in the server block
- if tokens, ok := sb.Tokens[dir]; ok {
- controller := &Controller{
- instance: inst,
- Key: key,
- Dispenser: caddyfile.NewDispenserTokens(filename, tokens),
- OncePerServerBlock: func(f func() error) error {
- var err error
- once.Do(func() {
- err = f()
- })
- return err
- },
- ServerBlockIndex: i,
- ServerBlockKeyIndex: j,
- ServerBlockKeys: sb.Keys,
- ServerBlockStorage: storages[i][dir],
- }
+// exitProcess exits the process as gracefully as possible,
+// but it always exits, even if there are errors doing so.
+// It stops all apps, cleans up external locks, removes any
+// PID file, and shuts down admin endpoint(s) in a goroutine.
+// Errors are logged along the way, and an appropriate exit
+// code is emitted.
+func exitProcess(ctx context.Context, logger *zap.Logger) {
+ // let the rest of the program know we're quitting; only do it once
+ if !atomic.CompareAndSwapInt32(exiting, 0, 1) {
+ return
+ }
- setup, err := DirectiveAction(inst.serverType, dir)
- if err != nil {
- return err
- }
+ // give the OS or service/process manager our 2 weeks' notice: we quit
+ if err := notify.Stopping(); err != nil {
+ Log().Error("unable to notify service manager of stopping state", zap.Error(err))
+ }
- err = setup(controller)
- if err != nil {
- return err
- }
+ if logger == nil {
+ logger = Log()
+ }
+ logger.Warn("exiting; byeee!! 👋")
- storages[i][dir] = controller.ServerBlockStorage // persist for this server block
- }
- }
- }
+ exitCode := ExitCodeSuccess
+ lastContext := ActiveContext()
- if !justValidate {
- // See if there are any callbacks to execute after this directive
- if allCallbacks, ok := parsingCallbacks[inst.serverType]; ok {
- callbacks := allCallbacks[dir]
- for _, callback := range callbacks {
- if err := callback(inst.context); err != nil {
- return err
- }
- }
- }
- }
+ // stop all apps
+ if err := Stop(); err != nil {
+ logger.Error("failed to stop apps", zap.Error(err))
+ exitCode = ExitCodeFailedQuit
}
- return nil
-}
+ // clean up certmagic locks
+ certmagic.CleanUpOwnLocks(ctx, logger)
-func startServers(serverList []Server, inst *Instance, restartFds map[string]restartTriple) error {
- errChan := make(chan error, len(serverList))
-
- for _, s := range serverList {
- var (
- ln net.Listener
- pc net.PacketConn
- err error
- )
-
- // if performing an upgrade, obtain listener file descriptors
- // from parent process
- if IsUpgrade() {
- if gs, ok := s.(GracefulServer); ok {
- addr := gs.Address()
- if fdIndex, ok := loadedGob.ListenerFds["tcp"+addr]; ok {
- file := os.NewFile(fdIndex, "")
- ln, err = net.FileListener(file)
- file.Close()
- if err != nil {
- return err
- }
- }
- if fdIndex, ok := loadedGob.ListenerFds["udp"+addr]; ok {
- file := os.NewFile(fdIndex, "")
- pc, err = net.FilePacketConn(file)
- file.Close()
- if err != nil {
- return err
- }
- }
- }
+ // remove pidfile
+ if pidfile != "" {
+ err := os.Remove(pidfile)
+ if err != nil {
+ logger.Error("cleaning up PID file:",
+ zap.String("pidfile", pidfile),
+ zap.Error(err))
+ exitCode = ExitCodeFailedQuit
}
+ }
- // If this is a reload and s is a GracefulServer,
- // reuse the listener for a graceful restart.
- if gs, ok := s.(GracefulServer); ok && restartFds != nil {
- addr := gs.Address()
- if old, ok := restartFds[addr]; ok {
- // listener
- if old.listener != nil {
- file, err := old.listener.File()
- if err != nil {
- return err
- }
- ln, err = net.FileListener(file)
- if err != nil {
- return err
- }
- file.Close()
- }
- // packetconn
- if old.packet != nil {
- file, err := old.packet.File()
- if err != nil {
- return err
- }
- pc, err = net.FilePacketConn(file)
- if err != nil {
- return err
- }
- file.Close()
- }
+ // execute any process-exit callbacks
+ for _, exitFunc := range lastContext.exitFuncs {
+ exitFunc(ctx)
+ }
+ exitFuncsMu.Lock()
+ for _, exitFunc := range exitFuncs {
+ exitFunc(ctx)
+ }
+ exitFuncsMu.Unlock()
+
+ // shut down admin endpoint(s) in goroutines so that
+ // if this function was called from an admin handler,
+ // it has a chance to return gracefully
+ // use goroutine so that we can finish responding to API request
+ go func() {
+ defer func() {
+ logger = logger.With(zap.Int("exit_code", exitCode))
+ if exitCode == ExitCodeSuccess {
+ logger.Info("shutdown complete")
+ } else {
+ logger.Error("unclean shutdown")
}
- }
+ os.Exit(exitCode)
+ }()
- if ln == nil {
- ln, err = s.Listen()
+ if remoteAdminServer != nil {
+ err := stopAdminServer(remoteAdminServer)
if err != nil {
- return err
+ exitCode = ExitCodeFailedQuit
+ logger.Error("failed to stop remote admin server gracefully", zap.Error(err))
}
}
- if pc == nil {
- pc, err = s.ListenPacket()
+ if localAdminServer != nil {
+ err := stopAdminServer(localAdminServer)
if err != nil {
- return err
+ exitCode = ExitCodeFailedQuit
+ logger.Error("failed to stop local admin server gracefully", zap.Error(err))
}
}
+ }()
+}
- inst.wg.Add(2)
- go func(s Server, ln net.Listener, pc net.PacketConn, inst *Instance) {
- defer inst.wg.Done()
+var exiting = new(int32) // accessed atomically
- go func() {
- errChan <- s.Serve(ln)
- defer inst.wg.Done()
- }()
- errChan <- s.ServePacket(pc)
- }(s, ln, pc, inst)
+// Exiting returns true if the process is exiting.
+// EXPERIMENTAL API: subject to change or removal.
+func Exiting() bool { return atomic.LoadInt32(exiting) == 1 }
- inst.servers = append(inst.servers, ServerListener{server: s, listener: ln, packet: pc})
- }
+// OnExit registers a callback to invoke during process exit.
+// This registration is PROCESS-GLOBAL, meaning that each
+// function should only be registered once forever, NOT once
+// per config load (etc).
+//
+// EXPERIMENTAL API: subject to change or removal.
+func OnExit(f func(context.Context)) {
+ exitFuncsMu.Lock()
+ exitFuncs = append(exitFuncs, f)
+ exitFuncsMu.Unlock()
+}
- // Log errors that may be returned from Serve() calls,
- // these errors should only be occurring in the server loop.
- go func() {
- for err := range errChan {
- if err == nil {
- continue
- }
- if strings.Contains(err.Error(), "use of closed network connection") {
- // this error is normal when closing the listener
- continue
- }
- log.Println(err)
- }
- }()
+var (
+ exitFuncs []func(context.Context)
+ exitFuncsMu sync.Mutex
+)
- return nil
-}
+// Duration can be an integer or a string. An integer is
+// interpreted as nanoseconds. If a string, it is a Go
+// time.Duration value such as `300ms`, `1.5h`, or `2h45m`;
+// valid units are `ns`, `us`/`µs`, `ms`, `s`, `m`, `h`, and `d`.
+type Duration time.Duration
-func getServerType(serverType string) (ServerType, error) {
- stype, ok := serverTypes[serverType]
- if ok {
- return stype, nil
+// UnmarshalJSON satisfies json.Unmarshaler.
+func (d *Duration) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ return io.EOF
}
- if len(serverTypes) == 0 {
- return ServerType{}, fmt.Errorf("no server types plugged in")
+ var dur time.Duration
+ var err error
+ if b[0] == byte('"') && b[len(b)-1] == byte('"') {
+ dur, err = ParseDuration(strings.Trim(string(b), `"`))
+ } else {
+ err = json.Unmarshal(b, &dur)
}
- if serverType == "" {
- if len(serverTypes) == 1 {
- for _, stype := range serverTypes {
- return stype, nil
- }
- }
- return ServerType{}, fmt.Errorf("multiple server types available; must choose one")
- }
- return ServerType{}, fmt.Errorf("unknown server type '%s'", serverType)
+ *d = Duration(dur)
+ return err
}
-func loadServerBlocks(serverType, filename string, input io.Reader) ([]caddyfile.ServerBlock, error) {
- validDirectives := ValidDirectives(serverType)
- serverBlocks, err := caddyfile.Parse(filename, input, validDirectives)
- if err != nil {
- return nil, err
+// ParseDuration parses a duration string, adding
+// support for the "d" unit meaning number of days,
+// where a day is assumed to be 24h. The maximum
+// input string length is 1024.
+func ParseDuration(s string) (time.Duration, error) {
+ if len(s) > 1024 {
+ return 0, fmt.Errorf("parsing duration: input string too long")
}
- if len(serverBlocks) == 0 && serverTypes[serverType].DefaultInput != nil {
- newInput := serverTypes[serverType].DefaultInput()
- serverBlocks, err = caddyfile.Parse(newInput.Path(),
- bytes.NewReader(newInput.Body()), validDirectives)
- if err != nil {
- return nil, err
+ var inNumber bool
+ var numStart int
+ for i := 0; i < len(s); i++ {
+ ch := s[i]
+ if ch == 'd' {
+ daysStr := s[numStart:i]
+ days, err := strconv.ParseFloat(daysStr, 64)
+ if err != nil {
+ return 0, err
+ }
+ hours := days * 24.0
+ hoursStr := strconv.FormatFloat(hours, 'f', -1, 64)
+ s = s[:numStart] + hoursStr + "h" + s[i+1:]
+ i--
+ continue
+ }
+ if !inNumber {
+ numStart = i
}
+ inNumber = (ch >= '0' && ch <= '9') || ch == '.' || ch == '-' || ch == '+'
}
- return serverBlocks, nil
+ return time.ParseDuration(s)
}
-// Stop stops ALL servers. It blocks until they are all stopped.
-// It does NOT execute shutdown callbacks, and it deletes all
-// instances after stopping is completed. Do not re-use any
-// references to old instances after calling Stop.
-func Stop() error {
- // This awkward for loop is to avoid a deadlock since
- // inst.Stop() also acquires the instancesMu lock.
- for {
- instancesMu.Lock()
- if len(instances) == 0 {
- break
+// InstanceID returns the UUID for this instance, and generates one if it
+// does not already exist. The UUID is stored in the local data directory,
+// regardless of storage configuration, since each instance is intended to
+// have its own unique ID.
+func InstanceID() (uuid.UUID, error) {
+ appDataDir := AppDataDir()
+ uuidFilePath := filepath.Join(appDataDir, "instance.uuid")
+ uuidFileBytes, err := os.ReadFile(uuidFilePath)
+ if errors.Is(err, fs.ErrNotExist) {
+ uuid, err := uuid.NewRandom()
+ if err != nil {
+ return uuid, err
}
- inst := instances[0]
- instancesMu.Unlock()
- if err := inst.Stop(); err != nil {
- log.Printf("[ERROR] Stopping %s: %v", inst.serverType, err)
+ err = os.MkdirAll(appDataDir, 0o700)
+ if err != nil {
+ return uuid, err
}
+ err = os.WriteFile(uuidFilePath, []byte(uuid.String()), 0o600)
+ return uuid, err
+ } else if err != nil {
+ return [16]byte{}, err
}
- return nil
+ return uuid.ParseBytes(uuidFileBytes)
}
-// IsLoopback returns true if the hostname of addr looks
-// explicitly like a common local hostname. addr must only
-// be a host or a host:port combination.
-func IsLoopback(addr string) bool {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- host = addr // happens if the addr is just a hostname
- }
- return host == "localhost" ||
- strings.Trim(host, "[]") == "::1" ||
- strings.HasPrefix(host, "127.")
-}
-
-// IsInternal returns true if the IP of addr
-// belongs to a private network IP range. addr must only
-// be an IP or an IP:port combination.
-// Loopback addresses are considered false.
-func IsInternal(addr string) bool {
- privateNetworks := []string{
- "10.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "fc00::/7",
- }
-
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- host = addr // happens if the addr is just a hostname, missing port
- // if we encounter an error, the brackets need to be stripped
- // because SplitHostPort didn't do it for us
- host = strings.Trim(host, "[]")
+// CustomVersion is an optional string that overrides Caddy's
+// reported version. It can be helpful when downstream packagers
+// need to manually set Caddy's version. If no other version
+// information is available, the short form version (see
+// Version()) will be set to CustomVersion, and the full version
+// will include CustomVersion at the beginning.
+//
+// Set this variable during `go build` with `-ldflags`:
+//
+// -ldflags '-X github.com/caddyserver/caddy/v2.CustomVersion=v2.6.2'
+//
+// for example.
+var CustomVersion string
+
+// Version returns the Caddy version in a simple/short form, and
+// a full version string. The short form will not have spaces and
+// is intended for User-Agent strings and similar, but may be
+// omitting valuable information. Note that Caddy must be compiled
+// in a special way to properly embed complete version information.
+// First this function tries to get the version from the embedded
+// build info provided by go.mod dependencies; then it tries to
+// get info from embedded VCS information, which requires having
+// built Caddy from a git repository. If no version is available,
+// this function returns "(devel)" because Go uses that, but for
+// the simple form we change it to "unknown". If still no version
+// is available (e.g. no VCS repo), then it will use CustomVersion;
+// CustomVersion is always prepended to the full version string.
+//
+// See relevant Go issues: https://github.com/golang/go/issues/29228
+// and https://github.com/golang/go/issues/50603.
+//
+// This function is experimental and subject to change or removal.
+func Version() (simple, full string) {
+ // the currently-recommended way to build Caddy involves
+ // building it as a dependency so we can extract version
+ // information from go.mod tooling; once the upstream
+ // Go issues are fixed, we should just be able to use
+ // bi.Main... hopefully.
+ var module *debug.Module
+ bi, ok := debug.ReadBuildInfo()
+ if !ok {
+ if CustomVersion != "" {
+ full = CustomVersion
+ simple = CustomVersion
+ return
+ }
+ full = "unknown"
+ simple = "unknown"
+ return
}
- ip := net.ParseIP(host)
- if ip == nil {
- return false
+ // find the Caddy module in the dependency list
+ for _, dep := range bi.Deps {
+ if dep.Path == ImportPath {
+ module = dep
+ break
+ }
}
- for _, privateNetwork := range privateNetworks {
- _, ipnet, _ := net.ParseCIDR(privateNetwork)
- if ipnet.Contains(ip) {
- return true
+ if module != nil {
+ simple, full = module.Version, module.Version
+ if module.Sum != "" {
+ full += " " + module.Sum
+ }
+ if module.Replace != nil {
+ full += " => " + module.Replace.Path
+ if module.Replace.Version != "" {
+ simple = module.Replace.Version + "_custom"
+ full += "@" + module.Replace.Version
+ }
+ if module.Replace.Sum != "" {
+ full += " " + module.Replace.Sum
+ }
}
}
- return false
-}
-
-// Started returns true if at least one instance has been
-// started by this package. It never gets reset to false
-// once it is set to true.
-func Started() bool {
- mu.Lock()
- defer mu.Unlock()
- return started
-}
-// CaddyfileInput represents a Caddyfile as input
-// and is simply a convenient way to implement
-// the Input interface.
-type CaddyfileInput struct {
- Filepath string
- Contents []byte
- ServerTypeName string
-}
+ if full == "" {
+ var vcsRevision string
+ var vcsTime time.Time
+ var vcsModified bool
+ for _, setting := range bi.Settings {
+ switch setting.Key {
+ case "vcs.revision":
+ vcsRevision = setting.Value
+ case "vcs.time":
+ vcsTime, _ = time.Parse(time.RFC3339, setting.Value)
+ case "vcs.modified":
+ vcsModified, _ = strconv.ParseBool(setting.Value)
+ }
+ }
-// Body returns c.Contents.
-func (c CaddyfileInput) Body() []byte { return c.Contents }
+ if vcsRevision != "" {
+ var modified string
+ if vcsModified {
+ modified = "+modified"
+ }
+ full = fmt.Sprintf("%s%s (%s)", vcsRevision, modified, vcsTime.Format(time.RFC822))
+ simple = vcsRevision
-// Path returns c.Filepath.
-func (c CaddyfileInput) Path() string { return c.Filepath }
+ // use short checksum for simple, if hex-only
+ if _, err := hex.DecodeString(simple); err == nil {
+ simple = simple[:8]
+ }
-// ServerType returns c.ServerType.
-func (c CaddyfileInput) ServerType() string { return c.ServerTypeName }
+ // append date to simple since it can be convenient
+ // to know the commit date as part of the version
+ if !vcsTime.IsZero() {
+ simple += "-" + vcsTime.Format("20060102")
+ }
+ }
+ }
-// Input represents a Caddyfile; its contents and file path
-// (which should include the file name at the end of the path).
-// If path does not apply (e.g. piped input) you may use
-// any understandable value. The path is mainly used for logging,
-// error messages, and debugging.
-type Input interface {
- // Gets the Caddyfile contents
- Body() []byte
+ if full == "" {
+ if CustomVersion != "" {
+ full = CustomVersion
+ } else {
+ full = "unknown"
+ }
+ } else if CustomVersion != "" {
+ full = CustomVersion + " " + full
+ }
- // Gets the path to the origin file
- Path() string
+ if simple == "" || simple == "(devel)" {
+ if CustomVersion != "" {
+ simple = CustomVersion
+ } else {
+ simple = "unknown"
+ }
+ }
- // The type of server this input is intended for
- ServerType() string
+ return
}
-// DefaultInput returns the default Caddyfile input
-// to use when it is otherwise empty or missing.
-// It uses the default host and port (depends on
-// host, e.g. localhost is 2015, otherwise 443) and
-// root.
-func DefaultInput(serverType string) Input {
- if _, ok := serverTypes[serverType]; !ok {
- return nil
- }
- if serverTypes[serverType].DefaultInput == nil {
- return nil
- }
- return serverTypes[serverType].DefaultInput()
+// ActiveContext returns the currently-active context.
+// This function is experimental and might be changed
+// or removed in the future.
+func ActiveContext() Context {
+ currentCtxMu.RLock()
+ defer currentCtxMu.RUnlock()
+ return currentCtx
}
-// writePidFile writes the process ID to the file at PidFile.
-// It does nothing if PidFile is not set.
-func writePidFile() error {
- if PidFile == "" {
- return nil
+// CtxKey is a value type for use with context.WithValue.
+type CtxKey string
+
+// This group of variables pertains to the current configuration.
+var (
+ // currentCtx is the root context for the currently-running
+ // configuration, which can be accessed through this value.
+ // If the Config contained in this value is not nil, then
+ // a config is currently active/running.
+ currentCtx Context
+ currentCtxMu sync.RWMutex
+
+ // rawCfg is the current, generic-decoded configuration;
+ // we initialize it as a map with one field ("config")
+ // to maintain parity with the API endpoint and to avoid
+ // the special case of having to access/mutate the variable
+ // directly without traversing into it.
+ rawCfg = map[string]any{
+ rawConfigKey: nil,
}
- pid := []byte(strconv.Itoa(os.Getpid()) + "\n")
- return ioutil.WriteFile(PidFile, pid, 0644)
-}
-type restartTriple struct {
- server GracefulServer
- listener Listener
- packet PacketConn
-}
+ // rawCfgJSON is the JSON-encoded form of rawCfg. Keeping
+ // this around avoids an extra Marshal call during changes.
+ rawCfgJSON []byte
-var (
- // instances is the list of running Instances.
- instances []*Instance
+ // rawCfgIndex is the map of user-assigned ID to expanded
+ // path, for converting /id/ paths to /config/ paths.
+ rawCfgIndex map[string]string
- // instancesMu protects instances.
- instancesMu sync.Mutex
+ // rawCfgMu protects all the rawCfg fields and also
+ // essentially synchronizes config changes/reloads.
+ rawCfgMu sync.RWMutex
)
-var (
- // DefaultConfigFile is the name of the configuration file that is loaded
- // by default if no other file is specified.
- DefaultConfigFile = "Caddyfile"
-)
+// errSameConfig is returned if the new config is the same
+// as the old one. This isn't usually an actual, actionable
+// error; it's mostly a sentinel value.
+var errSameConfig = errors.New("config is unchanged")
-// CtxKey is a value type for use with context.WithValue.
-type CtxKey string
+// ImportPath is the package import path for Caddy core.
+// This identifier may be removed in the future.
+const ImportPath = "github.com/caddyserver/caddy/v2"
diff --git a/caddy/build.bash b/caddy/build.bash
deleted file mode 100755
index f55a98826bc..00000000000
--- a/caddy/build.bash
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env bash
-#
-# Caddy build script. Automates proper versioning.
-#
-# Usage:
-#
-# $ ./build.bash [output_filename] [git_repo]
-#
-# Outputs compiled program in current directory.
-# Default git repo is current directory.
-# Builds always take place from current directory.
-
-set -euo pipefail
-
-: ${output_filename:="${1:-}"}
-: ${output_filename:=""}
-
-: ${git_repo:="${2:-}"}
-: ${git_repo:="."}
-
-pkg=github.com/mholt/caddy/caddy/caddymain
-ldflags=()
-
-# Timestamp of build
-name="${pkg}.buildDate"
-value=$(date -u +"%a %b %d %H:%M:%S %Z %Y")
-ldflags+=("-X" "\"${name}=${value}\"")
-
-# Current tag, if HEAD is on a tag
-name="${pkg}.gitTag"
-set +e
-value="$(git -C "${git_repo}" describe --exact-match HEAD 2>/dev/null)"
-set -e
-ldflags+=("-X" "\"${name}=${value}\"")
-
-# Nearest tag on branch
-name="${pkg}.gitNearestTag"
-value="$(git -C "${git_repo}" describe --abbrev=0 --tags HEAD)"
-ldflags+=("-X" "\"${name}=${value}\"")
-
-# Commit SHA
-name="${pkg}.gitCommit"
-value="$(git -C "${git_repo}" rev-parse --short HEAD)"
-ldflags+=("-X" "\"${name}=${value}\"")
-
-# Summary of uncommitted changes
-name="${pkg}.gitShortStat"
-value="$(git -C "${git_repo}" diff-index --shortstat HEAD)"
-ldflags+=("-X" "\"${name}=${value}\"")
-
-# List of modified files
-name="${pkg}.gitFilesModified"
-value="$(git -C "${git_repo}" diff-index --name-only HEAD)"
-ldflags+=("-X" "\"${name}=${value}\"")
-
-go build -ldflags "${ldflags[*]}" -o "${output_filename}"
diff --git a/caddy/caddymain/run.go b/caddy/caddymain/run.go
deleted file mode 100644
index b88997192bc..00000000000
--- a/caddy/caddymain/run.go
+++ /dev/null
@@ -1,260 +0,0 @@
-package caddymain
-
-import (
- "errors"
- "flag"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "runtime"
- "strconv"
- "strings"
-
- "gopkg.in/natefinch/lumberjack.v2"
-
- "github.com/xenolf/lego/acme"
-
- "github.com/mholt/caddy"
- // plug in the HTTP server type
- _ "github.com/mholt/caddy/caddyhttp"
-
- "github.com/mholt/caddy/caddytls"
- // This is where other plugins get plugged in (imported)
-)
-
-func init() {
- caddy.TrapSignals()
- setVersion()
-
- flag.BoolVar(&caddytls.Agreed, "agree", false, "Agree to the CA's Subscriber Agreement")
- flag.StringVar(&caddytls.DefaultCAUrl, "ca", "https://acme-v01.api.letsencrypt.org/directory", "URL to certificate authority's ACME server directory")
- flag.BoolVar(&caddytls.DisableHTTPChallenge, "disable-http-challenge", caddytls.DisableHTTPChallenge, "Disable the ACME HTTP challenge")
- flag.BoolVar(&caddytls.DisableTLSSNIChallenge, "disable-tls-sni-challenge", caddytls.DisableTLSSNIChallenge, "Disable the ACME TLS-SNI challenge")
- flag.StringVar(&conf, "conf", "", "Caddyfile to load (default \""+caddy.DefaultConfigFile+"\")")
- flag.StringVar(&cpu, "cpu", "100%", "CPU cap")
- flag.BoolVar(&plugins, "plugins", false, "List installed plugins")
- flag.StringVar(&caddytls.DefaultEmail, "email", "", "Default ACME CA account email address")
- flag.DurationVar(&acme.HTTPClient.Timeout, "catimeout", acme.HTTPClient.Timeout, "Default ACME CA HTTP timeout")
- flag.StringVar(&logfile, "log", "", "Process log file")
- flag.StringVar(&caddy.PidFile, "pidfile", "", "Path to write pid file")
- flag.BoolVar(&caddy.Quiet, "quiet", false, "Quiet mode (no initialization output)")
- flag.StringVar(&revoke, "revoke", "", "Hostname for which to revoke the certificate")
- flag.StringVar(&serverType, "type", "http", "Type of server to run")
- flag.BoolVar(&version, "version", false, "Show version")
- flag.BoolVar(&validate, "validate", false, "Parse the Caddyfile but do not start the server")
-
- caddy.RegisterCaddyfileLoader("flag", caddy.LoaderFunc(confLoader))
- caddy.SetDefaultCaddyfileLoader("default", caddy.LoaderFunc(defaultLoader))
-}
-
-// Run is Caddy's main() function.
-func Run() {
- flag.Parse()
-
- caddy.AppName = appName
- caddy.AppVersion = appVersion
- acme.UserAgent = appName + "/" + appVersion
-
- // Set up process log before anything bad happens
- switch logfile {
- case "stdout":
- log.SetOutput(os.Stdout)
- case "stderr":
- log.SetOutput(os.Stderr)
- case "":
- log.SetOutput(ioutil.Discard)
- default:
- log.SetOutput(&lumberjack.Logger{
- Filename: logfile,
- MaxSize: 100,
- MaxAge: 14,
- MaxBackups: 10,
- })
- }
-
- // Check for one-time actions
- if revoke != "" {
- err := caddytls.Revoke(revoke)
- if err != nil {
- mustLogFatalf("%v", err)
- }
- fmt.Printf("Revoked certificate for %s\n", revoke)
- os.Exit(0)
- }
- if version {
- fmt.Printf("%s %s\n", appName, appVersion)
- if devBuild && gitShortStat != "" {
- fmt.Printf("%s\n%s\n", gitShortStat, gitFilesModified)
- }
- os.Exit(0)
- }
- if plugins {
- fmt.Println(caddy.DescribePlugins())
- os.Exit(0)
- }
-
- // Set CPU cap
- err := setCPU(cpu)
- if err != nil {
- mustLogFatalf("%v", err)
- }
-
- // Executes Startup events
- caddy.EmitEvent(caddy.StartupEvent, nil)
-
- // Get Caddyfile input
- caddyfileinput, err := caddy.LoadCaddyfile(serverType)
- if err != nil {
- mustLogFatalf("%v", err)
- }
-
- if validate {
- err := caddy.ValidateAndExecuteDirectives(caddyfileinput, nil, true)
- if err != nil {
- mustLogFatalf("%v", err)
- }
- msg := "Caddyfile is valid"
- fmt.Println(msg)
- log.Printf("[INFO] %s", msg)
- os.Exit(0)
- }
-
- // Start your engines
- instance, err := caddy.Start(caddyfileinput)
- if err != nil {
- mustLogFatalf("%v", err)
- }
-
- // Twiddle your thumbs
- instance.Wait()
-}
-
-// mustLogFatalf wraps log.Fatalf() in a way that ensures the
-// output is always printed to stderr so the user can see it
-// if the user is still there, even if the process log was not
-// enabled. If this process is an upgrade, however, and the user
-// might not be there anymore, this just logs to the process
-// log and exits.
-func mustLogFatalf(format string, args ...interface{}) {
- if !caddy.IsUpgrade() {
- log.SetOutput(os.Stderr)
- }
- log.Fatalf(format, args...)
-}
-
-// confLoader loads the Caddyfile using the -conf flag.
-func confLoader(serverType string) (caddy.Input, error) {
- if conf == "" {
- return nil, nil
- }
-
- if conf == "stdin" {
- return caddy.CaddyfileFromPipe(os.Stdin, serverType)
- }
-
- contents, err := ioutil.ReadFile(conf)
- if err != nil {
- return nil, err
- }
- return caddy.CaddyfileInput{
- Contents: contents,
- Filepath: conf,
- ServerTypeName: serverType,
- }, nil
-}
-
-// defaultLoader loads the Caddyfile from the current working directory.
-func defaultLoader(serverType string) (caddy.Input, error) {
- contents, err := ioutil.ReadFile(caddy.DefaultConfigFile)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
- return nil, err
- }
- return caddy.CaddyfileInput{
- Contents: contents,
- Filepath: caddy.DefaultConfigFile,
- ServerTypeName: serverType,
- }, nil
-}
-
-// setVersion figures out the version information
-// based on variables set by -ldflags.
-func setVersion() {
- // A development build is one that's not at a tag or has uncommitted changes
- devBuild = gitTag == "" || gitShortStat != ""
-
- // Only set the appVersion if -ldflags was used
- if gitNearestTag != "" || gitTag != "" {
- if devBuild && gitNearestTag != "" {
- appVersion = fmt.Sprintf("%s (+%s %s)",
- strings.TrimPrefix(gitNearestTag, "v"), gitCommit, buildDate)
- } else if gitTag != "" {
- appVersion = strings.TrimPrefix(gitTag, "v")
- }
- }
-}
-
-// setCPU parses string cpu and sets GOMAXPROCS
-// according to its value. It accepts either
-// a number (e.g. 3) or a percent (e.g. 50%).
-func setCPU(cpu string) error {
- var numCPU int
-
- availCPU := runtime.NumCPU()
-
- if strings.HasSuffix(cpu, "%") {
- // Percent
- var percent float32
- pctStr := cpu[:len(cpu)-1]
- pctInt, err := strconv.Atoi(pctStr)
- if err != nil || pctInt < 1 || pctInt > 100 {
- return errors.New("invalid CPU value: percentage must be between 1-100")
- }
- percent = float32(pctInt) / 100
- numCPU = int(float32(availCPU) * percent)
- } else {
- // Number
- num, err := strconv.Atoi(cpu)
- if err != nil || num < 1 {
- return errors.New("invalid CPU value: provide a number or percent greater than 0")
- }
- numCPU = num
- }
-
- if numCPU > availCPU {
- numCPU = availCPU
- }
-
- runtime.GOMAXPROCS(numCPU)
- return nil
-}
-
-const appName = "Caddy"
-
-// Flags that control program flow or startup
-var (
- serverType string
- conf string
- cpu string
- logfile string
- revoke string
- version bool
- plugins bool
- validate bool
-)
-
-// Build information obtained with the help of -ldflags
-var (
- appVersion = "(untracked dev build)" // inferred at startup
- devBuild = true // inferred at startup
-
- buildDate string // date -u
- gitTag string // git describe --exact-match HEAD 2> /dev/null
- gitNearestTag string // git describe --abbrev=0 --tags HEAD
- gitCommit string // git rev-parse HEAD
- gitShortStat string // git diff-index --shortstat
- gitFilesModified string // git diff-index --name-only HEAD
-)
diff --git a/caddy/caddymain/run_test.go b/caddy/caddymain/run_test.go
deleted file mode 100644
index d14abffe15d..00000000000
--- a/caddy/caddymain/run_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package caddymain
-
-import (
- "runtime"
- "testing"
-)
-
-func TestSetCPU(t *testing.T) {
- currentCPU := runtime.GOMAXPROCS(-1)
- maxCPU := runtime.NumCPU()
- halfCPU := int(0.5 * float32(maxCPU))
- if halfCPU < 1 {
- halfCPU = 1
- }
- for i, test := range []struct {
- input string
- output int
- shouldErr bool
- }{
- {"1", 1, false},
- {"-1", currentCPU, true},
- {"0", currentCPU, true},
- {"100%", maxCPU, false},
- {"50%", halfCPU, false},
- {"110%", currentCPU, true},
- {"-10%", currentCPU, true},
- {"invalid input", currentCPU, true},
- {"invalid input%", currentCPU, true},
- {"9999", maxCPU, false}, // over available CPU
- } {
- err := setCPU(test.input)
- if test.shouldErr && err == nil {
- t.Errorf("Test %d: Expected error, but there wasn't any", i)
- }
- if !test.shouldErr && err != nil {
- t.Errorf("Test %d: Expected no error, but there was one: %v", i, err)
- }
- if actual, expected := runtime.GOMAXPROCS(-1), test.output; actual != expected {
- t.Errorf("Test %d: GOMAXPROCS was %d but expected %d", i, actual, expected)
- }
- // teardown
- runtime.GOMAXPROCS(currentCPU)
- }
-}
diff --git a/caddy/main.go b/caddy/main.go
deleted file mode 100644
index 538a629f7ed..00000000000
--- a/caddy/main.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// By moving the application's package main logic into
-// a package other than main, it becomes much easier to
-// wrap caddy for custom builds that are go-gettable.
-// https://caddy.community/t/my-wish-for-0-9-go-gettable-custom-builds/59?u=matt
-
-package main
-
-import "github.com/mholt/caddy/caddy/caddymain"
-
-var run = caddymain.Run // replaced for tests
-
-func main() {
- run()
-}
diff --git a/caddy/main_test.go b/caddy/main_test.go
deleted file mode 100644
index 52063a75369..00000000000
--- a/caddy/main_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package main
-
-import "testing"
-
-// This works because it does not have the same signature as the
-// conventional "TestMain" function described in the testing package
-// godoc.
-func TestMain(t *testing.T) {
- var ran bool
- run = func() {
- ran = true
- }
- main()
- if !ran {
- t.Error("Expected Run() to be called, but it wasn't")
- }
-}
diff --git a/caddy_test.go b/caddy_test.go
index a7eb19b5140..adf14350e5b 100644
--- a/caddy_test.go
+++ b/caddy_test.go
@@ -1,159 +1,74 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package caddy
import (
- "net"
- "strconv"
"testing"
+ "time"
)
-/*
-// TODO
-func TestCaddyStartStop(t *testing.T) {
- caddyfile := "localhost:1984"
-
- for i := 0; i < 2; i++ {
- _, err := Start(CaddyfileInput{Contents: []byte(caddyfile)})
- if err != nil {
- t.Fatalf("Error starting, iteration %d: %v", i, err)
- }
-
- client := http.Client{
- Timeout: time.Duration(2 * time.Second),
- }
- resp, err := client.Get("http://localhost:1984")
- if err != nil {
- t.Fatalf("Expected GET request to succeed (iteration %d), but it failed: %v", i, err)
- }
- resp.Body.Close()
-
- err = Stop()
- if err != nil {
- t.Fatalf("Error stopping, iteration %d: %v", i, err)
- }
- }
-}
-*/
-
-func TestIsLoopback(t *testing.T) {
- for i, test := range []struct {
+func TestParseDuration(t *testing.T) {
+ const day = 24 * time.Hour
+ for i, tc := range []struct {
input string
- expect bool
+ expect time.Duration
}{
- {"example.com", false},
- {"localhost", true},
- {"localhost:1234", true},
- {"localhost:", true},
- {"127.0.0.1", true},
- {"127.0.0.1:443", true},
- {"127.0.1.5", true},
- {"10.0.0.5", false},
- {"12.7.0.1", false},
- {"[::1]", true},
- {"[::1]:1234", true},
- {"::1", true},
- {"::", false},
- {"[::]", false},
- {"local", false},
+ {
+ input: "3h",
+ expect: 3 * time.Hour,
+ },
+ {
+ input: "1d",
+ expect: day,
+ },
+ {
+ input: "1d30m",
+ expect: day + 30*time.Minute,
+ },
+ {
+ input: "1m2d",
+ expect: time.Minute + day*2,
+ },
+ {
+ input: "1m2d30s",
+ expect: time.Minute + day*2 + 30*time.Second,
+ },
+ {
+ input: "1d2d",
+ expect: 3 * day,
+ },
+ {
+ input: "1.5d",
+ expect: time.Duration(1.5 * float64(day)),
+ },
+ {
+ input: "4m1.25d",
+ expect: 4*time.Minute + time.Duration(1.25*float64(day)),
+ },
+ {
+ input: "-1.25d12h",
+ expect: time.Duration(-1.25*float64(day)) - 12*time.Hour,
+ },
} {
- if got, want := IsLoopback(test.input), test.expect; got != want {
- t.Errorf("Test %d (%s): expected %v but was %v", i, test.input, want, got)
- }
- }
-}
-
-func TestIsInternal(t *testing.T) {
- for i, test := range []struct {
- input string
- expect bool
- }{
- {"9.255.255.255", false},
- {"10.0.0.0", true},
- {"10.0.0.1", true},
- {"10.255.255.254", true},
- {"10.255.255.255", true},
- {"11.0.0.0", false},
- {"10.0.0.5:1234", true},
- {"11.0.0.5:1234", false},
-
- {"172.15.255.255", false},
- {"172.16.0.0", true},
- {"172.16.0.1", true},
- {"172.31.255.254", true},
- {"172.31.255.255", true},
- {"172.32.0.0", false},
- {"172.16.0.1:1234", true},
-
- {"192.167.255.255", false},
- {"192.168.0.0", true},
- {"192.168.0.1", true},
- {"192.168.255.254", true},
- {"192.168.255.255", true},
- {"192.169.0.0", false},
- {"192.168.0.1:1234", true},
-
- {"fbff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false},
- {"fc00::", true},
- {"fc00::1", true},
- {"[fc00::1]", true},
- {"[fc00::1]:8888", true},
- {"fdff:ffff:ffff:ffff:ffff:ffff:ffff:fffe", true},
- {"fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", true},
- {"fe00::", false},
- {"fd12:3456:789a:1::1:1234", true},
-
- {"example.com", false},
- {"localhost", false},
- {"localhost:1234", false},
- {"localhost:", false},
- {"127.0.0.1", false},
- {"127.0.0.1:443", false},
- {"127.0.1.5", false},
- {"12.7.0.1", false},
- {"[::1]", false},
- {"[::1]:1234", false},
- {"::1", false},
- {"::", false},
- {"[::]", false},
- {"local", false},
- } {
- if got, want := IsInternal(test.input), test.expect; got != want {
- t.Errorf("Test %d (%s): expected %v but was %v", i, test.input, want, got)
+ actual, err := ParseDuration(tc.input)
+ if err != nil {
+ t.Errorf("Test %d ('%s'): Got error: %v", i, tc.input, err)
+ continue
}
- }
-}
-
-func TestListenerAddrEqual(t *testing.T) {
- ln1, err := net.Listen("tcp", "[::]:0")
- if err != nil {
- t.Fatal(err)
- }
- defer ln1.Close()
- ln1port := strconv.Itoa(ln1.Addr().(*net.TCPAddr).Port)
-
- ln2, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatal(err)
- }
- defer ln2.Close()
- ln2port := strconv.Itoa(ln2.Addr().(*net.TCPAddr).Port)
-
- for i, test := range []struct {
- ln net.Listener
- addr string
- expect bool
- }{
- {ln1, ":" + ln2port, false},
- {ln1, "0.0.0.0:" + ln2port, false},
- {ln1, "0.0.0.0", false},
- {ln1, ":" + ln1port, true},
- {ln1, "0.0.0.0:" + ln1port, true},
- {ln2, ":" + ln2port, false},
- {ln2, "127.0.0.1:" + ln1port, false},
- {ln2, "127.0.0.1", false},
- {ln2, "127.0.0.1:" + ln2port, true},
- } {
- if got, want := listenerAddrEqual(test.ln, test.addr), test.expect; got != want {
- t.Errorf("Test %d (%s == %s): expected %v but was %v", i, test.addr, test.ln.Addr().String(), want, got)
+ if actual != tc.expect {
+ t.Errorf("Test %d ('%s'): Expected=%s Actual=%s", i, tc.input, tc.expect, actual)
}
}
}
diff --git a/caddyconfig/caddyfile/adapter.go b/caddyconfig/caddyfile/adapter.go
new file mode 100644
index 00000000000..da4f98337fb
--- /dev/null
+++ b/caddyconfig/caddyfile/adapter.go
@@ -0,0 +1,145 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+)
+
+// Adapter adapts Caddyfile to Caddy JSON.
+type Adapter struct {
+ ServerType ServerType
+}
+
+// Adapt converts the Caddyfile config in body to Caddy JSON.
+func (a Adapter) Adapt(body []byte, options map[string]any) ([]byte, []caddyconfig.Warning, error) {
+ if a.ServerType == nil {
+ return nil, nil, fmt.Errorf("no server type")
+ }
+ if options == nil {
+ options = make(map[string]any)
+ }
+
+ filename, _ := options["filename"].(string)
+ if filename == "" {
+ filename = "Caddyfile"
+ }
+
+ serverBlocks, err := Parse(filename, body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ cfg, warnings, err := a.ServerType.Setup(serverBlocks, options)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ // lint check: see if input was properly formatted; sometimes messy files parse
+ // successfully but result in logical errors (the Caddyfile is a bad format, I'm sorry)
+ if warning, different := FormattingDifference(filename, body); different {
+ warnings = append(warnings, warning)
+ }
+
+ result, err := json.Marshal(cfg)
+
+ return result, warnings, err
+}
+
+// FormattingDifference returns a warning and true if the formatted version
+// is any different from the input; empty warning and false otherwise.
+// TODO: also perform this check on imported files
+func FormattingDifference(filename string, body []byte) (caddyconfig.Warning, bool) {
+ // replace windows-style newlines to normalize comparison
+ normalizedBody := bytes.Replace(body, []byte("\r\n"), []byte("\n"), -1)
+
+ formatted := Format(normalizedBody)
+ if bytes.Equal(formatted, normalizedBody) {
+ return caddyconfig.Warning{}, false
+ }
+
+ // find where the difference is
+ line := 1
+ for i, ch := range normalizedBody {
+ if i >= len(formatted) || ch != formatted[i] {
+ break
+ }
+ if ch == '\n' {
+ line++
+ }
+ }
+ return caddyconfig.Warning{
+ File: filename,
+ Line: line,
+ Message: "Caddyfile input is not formatted; run 'caddy fmt --overwrite' to fix inconsistencies",
+ }, true
+}
+
+// Unmarshaler is a type that can unmarshal Caddyfile tokens to
+// set itself up for a JSON encoding. The goal of an unmarshaler
+// is not to set itself up for actual use, but to set itself up for
+// being marshaled into JSON. Caddyfile-unmarshaled values will not
+// be used directly; they will be encoded as JSON and then used from
+// that. Implementations _may_ be able to support multiple segments
+// (instances of their directive or batch of tokens); typically this
+// means wrapping parsing logic in a loop: `for d.Next() { ... }`.
+// More commonly, only a single segment is supported, so a simple
+// `d.Next()` at the start should be used to consume the module
+// identifier token (directive name, etc).
+type Unmarshaler interface {
+ UnmarshalCaddyfile(d *Dispenser) error
+}
+
+// ServerType is a type that can evaluate a Caddyfile and set up a caddy config.
+type ServerType interface {
+ // Setup takes the server blocks which contain tokens,
+ // as well as options (e.g. CLI flags) and creates a
+ // Caddy config, along with any warnings or an error.
+ Setup([]ServerBlock, map[string]any) (*caddy.Config, []caddyconfig.Warning, error)
+}
+
+// UnmarshalModule instantiates a module with the given ID and invokes
+// UnmarshalCaddyfile on the new value using the immediate next segment
+// of d as input. In other words, d's next token should be the first
+// token of the module's Caddyfile input.
+//
+// This function is used when the next segment of Caddyfile tokens
+// belongs to another Caddy module. The returned value is often
+// type-asserted to the module's associated type for practical use
+// when setting up a config.
+func UnmarshalModule(d *Dispenser, moduleID string) (Unmarshaler, error) {
+ mod, err := caddy.GetModule(moduleID)
+ if err != nil {
+ return nil, d.Errf("getting module named '%s': %v", moduleID, err)
+ }
+ inst := mod.New()
+ unm, ok := inst.(Unmarshaler)
+ if !ok {
+ return nil, d.Errf("module %s is not a Caddyfile unmarshaler; is %T", mod.ID, inst)
+ }
+ err = unm.UnmarshalCaddyfile(d.NewFromNextSegment())
+ if err != nil {
+ return nil, err
+ }
+ return unm, nil
+}
+
+// Interface guard
+var _ caddyconfig.Adapter = (*Adapter)(nil)
diff --git a/caddyconfig/caddyfile/dispenser.go b/caddyconfig/caddyfile/dispenser.go
new file mode 100644
index 00000000000..325bb54d3f3
--- /dev/null
+++ b/caddyconfig/caddyfile/dispenser.go
@@ -0,0 +1,521 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "strconv"
+ "strings"
+)
+
+// Dispenser is a type that dispenses tokens, similarly to a lexer,
+// except that it can do so with some notion of structure. An empty
+// Dispenser is invalid; call NewDispenser to make a proper instance.
+type Dispenser struct {
+ tokens []Token
+ cursor int
+ nesting int
+
+ // A map of arbitrary context data that can be used
+ // to pass through some information to unmarshalers.
+ context map[string]any
+}
+
+// NewDispenser returns a Dispenser filled with the given tokens.
+func NewDispenser(tokens []Token) *Dispenser {
+ return &Dispenser{
+ tokens: tokens,
+ cursor: -1,
+ }
+}
+
+// NewTestDispenser parses input into tokens and creates a new
+// Dispenser for test purposes only; any errors are fatal.
+func NewTestDispenser(input string) *Dispenser {
+ tokens, err := allTokens("Testfile", []byte(input))
+ if err != nil && err != io.EOF {
+ log.Fatalf("getting all tokens from input: %v", err)
+ }
+ return NewDispenser(tokens)
+}
+
+// Next loads the next token. Returns true if a token
+// was loaded; false otherwise. If false, all tokens
+// have been consumed.
+func (d *Dispenser) Next() bool {
+ if d.cursor < len(d.tokens)-1 {
+ d.cursor++
+ return true
+ }
+ return false
+}
+
+// Prev moves to the previous token. It does the inverse
+// of Next(), except this function may decrement the cursor
+// to -1 so that the next call to Next() points to the
+// first token; this allows dispensing to "start over". This
+// method returns true if the cursor ends up pointing to a
+// valid token.
+func (d *Dispenser) Prev() bool {
+ if d.cursor > -1 {
+ d.cursor--
+ return d.cursor > -1
+ }
+ return false
+}
+
+// NextArg loads the next token if it is on the same
+// line and if it is not a block opening (open curly
+// brace). Returns true if an argument token was
+// loaded; false otherwise. If false, all tokens on
+// the line have been consumed except for potentially
+// a block opening. It handles imported tokens
+// correctly.
+func (d *Dispenser) NextArg() bool {
+ if !d.nextOnSameLine() {
+ return false
+ }
+ if d.Val() == "{" {
+ // roll back; a block opening is not an argument
+ d.cursor--
+ return false
+ }
+ return true
+}
+
+// nextOnSameLine advances the cursor if the next
+// token is on the same line of the same file.
+func (d *Dispenser) nextOnSameLine() bool {
+ if d.cursor < 0 {
+ d.cursor++
+ return true
+ }
+ if d.cursor >= len(d.tokens)-1 {
+ return false
+ }
+ curr := d.tokens[d.cursor]
+ next := d.tokens[d.cursor+1]
+ if !isNextOnNewLine(curr, next) {
+ d.cursor++
+ return true
+ }
+ return false
+}
+
+// NextLine loads the next token only if it is not on the same
+// line as the current token, and returns true if a token was
+// loaded; false otherwise. If false, there is not another token
+// or it is on the same line. It handles imported tokens correctly.
+func (d *Dispenser) NextLine() bool {
+ if d.cursor < 0 {
+ d.cursor++
+ return true
+ }
+ if d.cursor >= len(d.tokens)-1 {
+ return false
+ }
+ curr := d.tokens[d.cursor]
+ next := d.tokens[d.cursor+1]
+ if isNextOnNewLine(curr, next) {
+ d.cursor++
+ return true
+ }
+ return false
+}
+
+// NextBlock can be used as the condition of a for loop
+// to load the next token as long as it opens a block or
+// is already in a block nested more than initialNestingLevel.
+// In other words, a loop over NextBlock() will iterate
+// all tokens in the block assuming the next token is an
+// open curly brace, until the matching closing brace.
+// The open and closing brace tokens for the outer-most
+// block will be consumed internally and omitted from
+// the iteration.
+//
+// Proper use of this method looks like this:
+//
+// for nesting := d.Nesting(); d.NextBlock(nesting); {
+// }
+//
+// However, in simple cases where it is known that the
+// Dispenser is new and has not already traversed state
+// by a loop over NextBlock(), this will do:
+//
+// for d.NextBlock(0) {
+// }
+//
+// As with other token parsing logic, a loop over
+// NextBlock() should be contained within a loop over
+// Next(), as it is usually prudent to skip the initial
+// token.
+func (d *Dispenser) NextBlock(initialNestingLevel int) bool {
+ if d.nesting > initialNestingLevel {
+ if !d.Next() {
+ return false // should be EOF error
+ }
+ if d.Val() == "}" && !d.nextOnSameLine() {
+ d.nesting--
+ } else if d.Val() == "{" && !d.nextOnSameLine() {
+ d.nesting++
+ }
+ return d.nesting > initialNestingLevel
+ }
+ if !d.nextOnSameLine() { // block must open on same line
+ return false
+ }
+ if d.Val() != "{" {
+ d.cursor-- // roll back if not opening brace
+ return false
+ }
+ d.Next() // consume open curly brace
+ if d.Val() == "}" {
+ return false // open and then closed right away
+ }
+ d.nesting++
+ return true
+}
+
+// Nesting returns the current nesting level. Necessary
+// if using NextBlock()
+func (d *Dispenser) Nesting() int {
+ return d.nesting
+}
+
+// Val gets the text of the current token. If there is no token
+// loaded, it returns empty string.
+func (d *Dispenser) Val() string {
+ if d.cursor < 0 || d.cursor >= len(d.tokens) {
+ return ""
+ }
+ return d.tokens[d.cursor].Text
+}
+
+// ValRaw gets the raw text of the current token (including quotes).
+// If the token was a heredoc, then the delimiter is not included,
+// because that is not relevant to any unmarshaling logic at this time.
+// If there is no token loaded, it returns empty string.
+func (d *Dispenser) ValRaw() string {
+ if d.cursor < 0 || d.cursor >= len(d.tokens) {
+ return ""
+ }
+ quote := d.tokens[d.cursor].wasQuoted
+ if quote > 0 && quote != '<' {
+ // string literal
+ return string(quote) + d.tokens[d.cursor].Text + string(quote)
+ }
+ return d.tokens[d.cursor].Text
+}
+
+// ScalarVal gets value of the current token, converted to the closest
+// scalar type. If there is no token loaded, it returns nil.
+func (d *Dispenser) ScalarVal() any {
+ if d.cursor < 0 || d.cursor >= len(d.tokens) {
+ return nil
+ }
+ quote := d.tokens[d.cursor].wasQuoted
+ text := d.tokens[d.cursor].Text
+
+ if quote > 0 {
+ return text // string literal
+ }
+ if num, err := strconv.Atoi(text); err == nil {
+ return num
+ }
+ if num, err := strconv.ParseFloat(text, 64); err == nil {
+ return num
+ }
+ if bool, err := strconv.ParseBool(text); err == nil {
+ return bool
+ }
+ return text
+}
+
+// Line gets the line number of the current token.
+// If there is no token loaded, it returns 0.
+func (d *Dispenser) Line() int {
+ if d.cursor < 0 || d.cursor >= len(d.tokens) {
+ return 0
+ }
+ return d.tokens[d.cursor].Line
+}
+
+// File gets the filename where the current token originated.
+func (d *Dispenser) File() string {
+ if d.cursor < 0 || d.cursor >= len(d.tokens) {
+ return ""
+ }
+ return d.tokens[d.cursor].File
+}
+
+// Args is a convenience function that loads the next arguments
+// (tokens on the same line) into an arbitrary number of strings
+// pointed to in targets. If there are not enough argument tokens
+// available to fill targets, false is returned and the remaining
+// targets are left unchanged. If all the targets are filled,
+// then true is returned.
+func (d *Dispenser) Args(targets ...*string) bool {
+ for i := 0; i < len(targets); i++ {
+ if !d.NextArg() {
+ return false
+ }
+ *targets[i] = d.Val()
+ }
+ return true
+}
+
+// AllArgs is like Args, but if there are more argument tokens
+// available than there are targets, false is returned. The
+// number of available argument tokens must match the number of
+// targets exactly to return true.
+func (d *Dispenser) AllArgs(targets ...*string) bool {
+ if !d.Args(targets...) {
+ return false
+ }
+ if d.NextArg() {
+ d.Prev()
+ return false
+ }
+ return true
+}
+
+// CountRemainingArgs counts the amount of remaining arguments
+// (tokens on the same line) without consuming the tokens.
+func (d *Dispenser) CountRemainingArgs() int {
+ count := 0
+ for d.NextArg() {
+ count++
+ }
+ for i := 0; i < count; i++ {
+ d.Prev()
+ }
+ return count
+}
+
+// RemainingArgs loads any more arguments (tokens on the same line)
+// into a slice and returns them. Open curly brace tokens also indicate
+// the end of arguments, and the curly brace is not included in
+// the return value nor is it loaded.
+func (d *Dispenser) RemainingArgs() []string {
+ var args []string
+ for d.NextArg() {
+ args = append(args, d.Val())
+ }
+ return args
+}
+
+// RemainingArgsRaw loads any more arguments (tokens on the same line,
+// retaining quotes) into a slice and returns them. Open curly brace
+// tokens also indicate the end of arguments, and the curly brace is
+// not included in the return value nor is it loaded.
+func (d *Dispenser) RemainingArgsRaw() []string {
+ var args []string
+ for d.NextArg() {
+ args = append(args, d.ValRaw())
+ }
+ return args
+}
+
+// NewFromNextSegment returns a new dispenser with a copy of
+// the tokens from the current token until the end of the
+// "directive" whether that be to the end of the line or
+// the end of a block that starts at the end of the line;
+// in other words, until the end of the segment.
+func (d *Dispenser) NewFromNextSegment() *Dispenser {
+ return NewDispenser(d.NextSegment())
+}
+
+// NextSegment returns a copy of the tokens from the current
+// token until the end of the line or block that starts at
+// the end of the line.
+func (d *Dispenser) NextSegment() Segment {
+ tkns := Segment{d.Token()}
+ for d.NextArg() {
+ tkns = append(tkns, d.Token())
+ }
+ var openedBlock bool
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ if !openedBlock {
+ // because NextBlock() consumes the initial open
+ // curly brace, we rewind here to append it, since
+ // our case is special in that we want the new
+ // dispenser to have all the tokens including
+ // surrounding curly braces
+ d.Prev()
+ tkns = append(tkns, d.Token())
+ d.Next()
+ openedBlock = true
+ }
+ tkns = append(tkns, d.Token())
+ }
+ if openedBlock {
+ // include closing brace
+ tkns = append(tkns, d.Token())
+
+ // do not consume the closing curly brace; the
+ // next iteration of the enclosing loop will
+ // call Next() and consume it
+ }
+ return tkns
+}
+
+// Token returns the current token.
+func (d *Dispenser) Token() Token {
+ if d.cursor < 0 || d.cursor >= len(d.tokens) {
+ return Token{}
+ }
+ return d.tokens[d.cursor]
+}
+
+// Reset sets d's cursor to the beginning, as
+// if this was a new and unused dispenser.
+func (d *Dispenser) Reset() {
+ d.cursor = -1
+ d.nesting = 0
+}
+
+// ArgErr returns an argument error, meaning that another
+// argument was expected but not found. In other words,
+// a line break or open curly brace was encountered instead of
+// an argument.
+func (d *Dispenser) ArgErr() error {
+ if d.Val() == "{" {
+ return d.Err("unexpected token '{', expecting argument")
+ }
+ return d.Errf("wrong argument count or unexpected line ending after '%s'", d.Val())
+}
+
+// SyntaxErr creates a generic syntax error which explains what was
+// found and what was expected.
+func (d *Dispenser) SyntaxErr(expected string) error {
+ msg := fmt.Sprintf("syntax error: unexpected token '%s', expecting '%s', at %s:%d import chain: ['%s']", d.Val(), expected, d.File(), d.Line(), strings.Join(d.Token().imports, "','"))
+ return errors.New(msg)
+}
+
+// EOFErr returns an error indicating that the dispenser reached
+// the end of the input when searching for the next token.
+func (d *Dispenser) EOFErr() error {
+ return d.Errf("unexpected EOF")
+}
+
+// Err generates a custom parse-time error with a message of msg.
+func (d *Dispenser) Err(msg string) error {
+ return d.WrapErr(errors.New(msg))
+}
+
+// Errf is like Err, but for formatted error messages
+func (d *Dispenser) Errf(format string, args ...any) error {
+ return d.WrapErr(fmt.Errorf(format, args...))
+}
+
+// WrapErr takes an existing error and adds the Caddyfile file and line number.
+func (d *Dispenser) WrapErr(err error) error {
+ if len(d.Token().imports) > 0 {
+ return fmt.Errorf("%w, at %s:%d import chain ['%s']", err, d.File(), d.Line(), strings.Join(d.Token().imports, "','"))
+ }
+ return fmt.Errorf("%w, at %s:%d", err, d.File(), d.Line())
+}
+
+// Delete deletes the current token and returns the updated slice
+// of tokens. The cursor is not advanced to the next token.
+// Because deletion modifies the underlying slice, this method
+// should only be called if you have access to the original slice
+// of tokens and/or are using the slice of tokens outside this
+// Dispenser instance. If you do not re-assign the slice with the
+// return value of this method, inconsistencies in the token
+// array will become apparent (or worse, hide from you like they
+// did me for 3 and a half freaking hours late one night).
+func (d *Dispenser) Delete() []Token {
+ if d.cursor >= 0 && d.cursor <= len(d.tokens)-1 {
+ d.tokens = append(d.tokens[:d.cursor], d.tokens[d.cursor+1:]...)
+ d.cursor--
+ }
+ return d.tokens
+}
+
+// DeleteN is the same as Delete, but can delete many tokens at once.
+// If there aren't N tokens available to delete, none are deleted.
+func (d *Dispenser) DeleteN(amount int) []Token {
+ if amount > 0 && d.cursor >= (amount-1) && d.cursor <= len(d.tokens)-1 {
+ d.tokens = append(d.tokens[:d.cursor-(amount-1)], d.tokens[d.cursor+1:]...)
+ d.cursor -= amount
+ }
+ return d.tokens
+}
+
+// SetContext sets a key-value pair in the context map.
+func (d *Dispenser) SetContext(key string, value any) {
+ if d.context == nil {
+ d.context = make(map[string]any)
+ }
+ d.context[key] = value
+}
+
+// GetContext gets the value of a key in the context map.
+func (d *Dispenser) GetContext(key string) any {
+ if d.context == nil {
+ return nil
+ }
+ return d.context[key]
+}
+
+// GetContextString gets the value of a key in the context map
+// as a string, or an empty string if the key does not exist.
+func (d *Dispenser) GetContextString(key string) string {
+ if d.context == nil {
+ return ""
+ }
+ if val, ok := d.context[key].(string); ok {
+ return val
+ }
+ return ""
+}
+
+// isNewLine determines whether the current token is on a different
+// line (higher line number) than the previous token. It handles imported
+// tokens correctly. If there isn't a previous token, it returns true.
+func (d *Dispenser) isNewLine() bool {
+ if d.cursor < 1 {
+ return true
+ }
+ if d.cursor > len(d.tokens)-1 {
+ return false
+ }
+
+ prev := d.tokens[d.cursor-1]
+ curr := d.tokens[d.cursor]
+ return isNextOnNewLine(prev, curr)
+}
+
+// isNextOnNewLine determines whether the current token is on a different
+// line (higher line number) than the next token. It handles imported
+// tokens correctly. If there isn't a next token, it returns true.
+func (d *Dispenser) isNextOnNewLine() bool {
+ if d.cursor < 0 {
+ return false
+ }
+ if d.cursor >= len(d.tokens)-1 {
+ return true
+ }
+
+ curr := d.tokens[d.cursor]
+ next := d.tokens[d.cursor+1]
+ return isNextOnNewLine(curr, next)
+}
+
+const MatcherNameCtxKey = "matcher_name"
diff --git a/caddyfile/dispenser_test.go b/caddyconfig/caddyfile/dispenser_test.go
similarity index 87%
rename from caddyfile/dispenser_test.go
rename to caddyconfig/caddyfile/dispenser_test.go
index 300cd1a51b9..0f6ee5043f4 100644
--- a/caddyfile/dispenser_test.go
+++ b/caddyconfig/caddyfile/dispenser_test.go
@@ -1,6 +1,21 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package caddyfile
import (
+ "errors"
"reflect"
"strings"
"testing"
@@ -11,7 +26,7 @@ func TestDispenser_Val_Next(t *testing.T) {
dir1 arg1
dir2 arg2 arg3
dir3`
- d := NewDispenser("Testfile", strings.NewReader(input))
+ d := NewTestDispenser(input)
if val := d.Val(); val != "" {
t.Fatalf("Val(): Should return empty string when no token loaded; got '%s'", val)
@@ -49,7 +64,7 @@ func TestDispenser_NextArg(t *testing.T) {
input := `dir1 arg1
dir2 arg2 arg3
dir3`
- d := NewDispenser("Testfile", strings.NewReader(input))
+ d := NewTestDispenser(input)
assertNext := func(shouldLoad bool, expectedVal string, expectedCursor int) {
if d.Next() != shouldLoad {
@@ -96,7 +111,7 @@ func TestDispenser_NextLine(t *testing.T) {
input := `host:port
dir1 arg1
dir2 arg2 arg3`
- d := NewDispenser("Testfile", strings.NewReader(input))
+ d := NewTestDispenser(input)
assertNextLine := func(shouldLoad bool, expectedVal string, expectedCursor int) {
if d.NextLine() != shouldLoad {
@@ -129,10 +144,10 @@ func TestDispenser_NextBlock(t *testing.T) {
}
foobar2 {
}`
- d := NewDispenser("Testfile", strings.NewReader(input))
+ d := NewTestDispenser(input)
assertNextBlock := func(shouldLoad bool, expectedCursor, expectedNesting int) {
- if loaded := d.NextBlock(); loaded != shouldLoad {
+ if loaded := d.NextBlock(0); loaded != shouldLoad {
t.Errorf("NextBlock(): Should return %v but got %v", shouldLoad, loaded)
}
if d.cursor != expectedCursor {
@@ -159,7 +174,7 @@ func TestDispenser_Args(t *testing.T) {
dir2 arg4 arg5
dir3 arg6 arg7
dir4`
- d := NewDispenser("Testfile", strings.NewReader(input))
+ d := NewTestDispenser(input)
d.Next() // dir1
@@ -226,7 +241,7 @@ func TestDispenser_RemainingArgs(t *testing.T) {
dir2 arg4 arg5
dir3 arg6 { arg7
dir4`
- d := NewDispenser("Testfile", strings.NewReader(input))
+ d := NewTestDispenser(input)
d.Next() // dir1
@@ -263,7 +278,7 @@ func TestDispenser_ArgErr_Err(t *testing.T) {
input := `dir1 {
}
dir2 arg1 arg2`
- d := NewDispenser("Testfile", strings.NewReader(input))
+ d := NewTestDispenser(input)
d.cursor = 1 // {
@@ -289,4 +304,10 @@ func TestDispenser_ArgErr_Err(t *testing.T) {
if !strings.Contains(err.Error(), "foobar") {
t.Errorf("Expected error message with custom message in it ('foobar'); got '%v'", err)
}
+
+ ErrBarIsFull := errors.New("bar is full")
+ bookingError := d.Errf("unable to reserve: %w", ErrBarIsFull)
+ if !errors.Is(bookingError, ErrBarIsFull) {
+ t.Errorf("Errf(): should be able to unwrap the error chain")
+ }
}
diff --git a/caddyconfig/caddyfile/formatter.go b/caddyconfig/caddyfile/formatter.go
new file mode 100644
index 00000000000..d35f0ac6b68
--- /dev/null
+++ b/caddyconfig/caddyfile/formatter.go
@@ -0,0 +1,298 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "bytes"
+ "io"
+ "slices"
+ "unicode"
+)
+
+// Format formats the input Caddyfile to a standard, nice-looking
+// appearance. It works by reading each rune of the input and taking
+// control over all the bracing and whitespace that is written; otherwise,
+// words, comments, placeholders, and escaped characters are all treated
+// literally and written as they appear in the input.
+func Format(input []byte) []byte {
+ input = bytes.TrimSpace(input)
+
+ out := new(bytes.Buffer)
+ rdr := bytes.NewReader(input)
+
+ type heredocState int
+
+ const (
+ heredocClosed heredocState = 0
+ heredocOpening heredocState = 1
+ heredocOpened heredocState = 2
+ )
+
+ var (
+ last rune // the last character that was written to the result
+
+ space = true // whether current/previous character was whitespace (beginning of input counts as space)
+ beginningOfLine = true // whether we are at beginning of line
+
+ openBrace bool // whether current word/token is or started with open curly brace
+ openBraceWritten bool // if openBrace, whether that brace was written or not
+ openBraceSpace bool // whether there was a non-newline space before open brace
+
+ newLines int // count of newlines consumed
+
+ comment bool // whether we're in a comment
+ quoted bool // whether we're in a quoted segment
+ escaped bool // whether current char is escaped
+
+ heredoc heredocState // whether we're in a heredoc
+ heredocEscaped bool // whether heredoc is escaped
+ heredocMarker []rune
+ heredocClosingMarker []rune
+
+ nesting int // indentation level
+ )
+
+ write := func(ch rune) {
+ out.WriteRune(ch)
+ last = ch
+ }
+
+ indent := func() {
+ for tabs := nesting; tabs > 0; tabs-- {
+ write('\t')
+ }
+ }
+
+ nextLine := func() {
+ write('\n')
+ beginningOfLine = true
+ }
+
+ for {
+ ch, _, err := rdr.ReadRune()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ panic(err)
+ }
+
+ // detect whether we have the start of a heredoc
+ if !quoted && !(heredoc != heredocClosed || heredocEscaped) &&
+ space && last == '<' && ch == '<' {
+ write(ch)
+ heredoc = heredocOpening
+ space = false
+ continue
+ }
+
+ if heredoc == heredocOpening {
+ if ch == '\n' {
+ if len(heredocMarker) > 0 && heredocMarkerRegexp.MatchString(string(heredocMarker)) {
+ heredoc = heredocOpened
+ } else {
+ heredocMarker = nil
+ heredoc = heredocClosed
+ nextLine()
+ continue
+ }
+ write(ch)
+ continue
+ }
+ if unicode.IsSpace(ch) {
+ // a space means it's just a regular token and not a heredoc
+ heredocMarker = nil
+ heredoc = heredocClosed
+ } else {
+ heredocMarker = append(heredocMarker, ch)
+ write(ch)
+ continue
+ }
+ }
+ // if we're in a heredoc, all characters are read&write as-is
+ if heredoc == heredocOpened {
+ heredocClosingMarker = append(heredocClosingMarker, ch)
+ if len(heredocClosingMarker) > len(heredocMarker)+1 { // We assert that the heredocClosingMarker is followed by a unicode.Space
+ heredocClosingMarker = heredocClosingMarker[1:]
+ }
+ // check if we're done
+ if unicode.IsSpace(ch) && slices.Equal(heredocClosingMarker[:len(heredocClosingMarker)-1], heredocMarker) {
+ heredocMarker = nil
+ heredocClosingMarker = nil
+ heredoc = heredocClosed
+ } else {
+ write(ch)
+ if ch == '\n' {
+ heredocClosingMarker = heredocClosingMarker[:0]
+ }
+ continue
+ }
+ }
+
+ if last == '<' && space {
+ space = false
+ }
+
+ if comment {
+ if ch == '\n' {
+ comment = false
+ space = true
+ nextLine()
+ continue
+ } else {
+ write(ch)
+ continue
+ }
+ }
+
+ if !escaped && ch == '\\' {
+ if space {
+ write(' ')
+ space = false
+ }
+ write(ch)
+ escaped = true
+ continue
+ }
+
+ if escaped {
+ if ch == '<' {
+ heredocEscaped = true
+ }
+ write(ch)
+ escaped = false
+ continue
+ }
+
+ if quoted {
+ if ch == '"' {
+ quoted = false
+ }
+ write(ch)
+ continue
+ }
+
+ if space && ch == '"' {
+ quoted = true
+ }
+
+ if unicode.IsSpace(ch) {
+ space = true
+ heredocEscaped = false
+ if ch == '\n' {
+ newLines++
+ }
+ continue
+ }
+ spacePrior := space
+ space = false
+
+ //////////////////////////////////////////////////////////
+ // I find it helpful to think of the formatting loop in two
+ // main sections; by the time we reach this point, we
+ // know we are in a "regular" part of the file: we know
+ // the character is not a space, not in a literal segment
+ // like a comment or quoted, it's not escaped, etc.
+ //////////////////////////////////////////////////////////
+
+ if ch == '#' {
+ comment = true
+ }
+
+ if openBrace && spacePrior && !openBraceWritten {
+ if nesting == 0 && last == '}' {
+ nextLine()
+ nextLine()
+ }
+
+ openBrace = false
+ if beginningOfLine {
+ indent()
+ } else if !openBraceSpace {
+ write(' ')
+ }
+ write('{')
+ openBraceWritten = true
+ nextLine()
+ newLines = 0
+ // prevent infinite nesting from ridiculous inputs (issue #4169)
+ if nesting < 10 {
+ nesting++
+ }
+ }
+
+ switch {
+ case ch == '{':
+ openBrace = true
+ openBraceWritten = false
+ openBraceSpace = spacePrior && !beginningOfLine
+ if openBraceSpace {
+ write(' ')
+ }
+ continue
+
+ case ch == '}' && (spacePrior || !openBrace):
+ if last != '\n' {
+ nextLine()
+ }
+ if nesting > 0 {
+ nesting--
+ }
+ indent()
+ write('}')
+ newLines = 0
+ continue
+ }
+
+ if newLines > 2 {
+ newLines = 2
+ }
+ for i := 0; i < newLines; i++ {
+ nextLine()
+ }
+ newLines = 0
+ if beginningOfLine {
+ indent()
+ }
+ if nesting == 0 && last == '}' && beginningOfLine {
+ nextLine()
+ nextLine()
+ }
+
+ if !beginningOfLine && spacePrior {
+ write(' ')
+ }
+
+ if openBrace && !openBraceWritten {
+ write('{')
+ openBraceWritten = true
+ }
+
+ if spacePrior && ch == '<' {
+ space = true
+ }
+
+ write(ch)
+
+ beginningOfLine = false
+ }
+
+ // the Caddyfile does not need any leading or trailing spaces, but...
+ trimmedResult := bytes.TrimSpace(out.Bytes())
+
+ // ...Caddyfiles should, however, end with a newline because
+ // newlines are significant to the syntax of the file
+ return append(trimmedResult, '\n')
+}
diff --git a/caddyconfig/caddyfile/formatter_fuzz.go b/caddyconfig/caddyfile/formatter_fuzz.go
new file mode 100644
index 00000000000..7c1fc643928
--- /dev/null
+++ b/caddyconfig/caddyfile/formatter_fuzz.go
@@ -0,0 +1,27 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build gofuzz
+
+package caddyfile
+
+import "bytes"
+
+func FuzzFormat(input []byte) int {
+ formatted := Format(input)
+ if bytes.Equal(formatted, Format(formatted)) {
+ return 1
+ }
+ return 0
+}
diff --git a/caddyconfig/caddyfile/formatter_test.go b/caddyconfig/caddyfile/formatter_test.go
new file mode 100644
index 00000000000..6eec822fe59
--- /dev/null
+++ b/caddyconfig/caddyfile/formatter_test.go
@@ -0,0 +1,451 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestFormatter(t *testing.T) {
+ for i, tc := range []struct {
+ description string
+ input string
+ expect string
+ }{
+ {
+ description: "very simple",
+ input: `abc def
+ g hi jkl
+mn`,
+ expect: `abc def
+g hi jkl
+mn`,
+ },
+ {
+ description: "basic indentation, line breaks, and nesting",
+ input: ` a
+b
+
+ c {
+ d
+}
+
+e { f
+}
+
+
+
+g {
+h {
+i
+}
+}
+
+j { k {
+l
+}
+}
+
+m {
+ n { o
+ }
+ p { q r
+s }
+}
+
+ {
+{ t
+ u
+
+ v
+
+w
+}
+}`,
+ expect: `a
+b
+
+c {
+ d
+}
+
+e {
+ f
+}
+
+g {
+ h {
+ i
+ }
+}
+
+j {
+ k {
+ l
+ }
+}
+
+m {
+ n {
+ o
+ }
+ p {
+ q r
+ s
+ }
+}
+
+{
+ {
+ t
+ u
+
+ v
+
+ w
+ }
+}`,
+ },
+ {
+ description: "block spacing",
+ input: `a{
+ b
+}
+
+c{ d
+}`,
+ expect: `a {
+ b
+}
+
+c {
+ d
+}`,
+ },
+ {
+ description: "advanced spacing",
+ input: `abc {
+ def
+}ghi{
+ jkl mno
+pqr}`,
+ expect: `abc {
+ def
+}
+
+ghi {
+ jkl mno
+ pqr
+}`,
+ },
+ {
+ description: "env var placeholders",
+ input: `{$A}
+
+b {
+{$C}
+}
+
+d { {$E}
+}
+
+{ {$F}
+}
+`,
+ expect: `{$A}
+
+b {
+ {$C}
+}
+
+d {
+ {$E}
+}
+
+{
+ {$F}
+}`,
+ },
+ {
+ description: "env var placeholders with port",
+ input: `:{$PORT}`,
+ expect: `:{$PORT}`,
+ },
+ {
+ description: "comments",
+ input: `#a "\n"
+
+ #b {
+ c
+}
+
+d {
+e#f
+# g
+}
+
+h { # i
+}`,
+ expect: `#a "\n"
+
+#b {
+c
+}
+
+d {
+ e#f
+ # g
+}
+
+h {
+ # i
+}`,
+ },
+ {
+ description: "quotes and escaping",
+ input: `"a \"b\" "#c
+ d
+
+e {
+"f"
+}
+
+g { "h"
+}
+
+i {
+ "foo
+bar"
+}
+
+j {
+"\"k\" l m"
+}`,
+ expect: `"a \"b\" "#c
+d
+
+e {
+ "f"
+}
+
+g {
+ "h"
+}
+
+i {
+ "foo
+bar"
+}
+
+j {
+ "\"k\" l m"
+}`,
+ },
+ {
+ description: "bad nesting (too many open)",
+ input: `a
+{
+ {
+}`,
+ expect: `a {
+ {
+ }
+`,
+ },
+ {
+ description: "bad nesting (too many close)",
+ input: `a
+{
+ {
+}}}`,
+ expect: `a {
+ {
+ }
+}
+}
+`,
+ },
+ {
+ description: "json",
+ input: `foo
+bar "{\"key\":34}"
+`,
+ expect: `foo
+bar "{\"key\":34}"`,
+ },
+ {
+ description: "escaping after spaces",
+ input: `foo \"literal\"`,
+ expect: `foo \"literal\"`,
+ },
+ {
+ description: "simple placeholders as standalone tokens",
+ input: `foo {bar}`,
+ expect: `foo {bar}`,
+ },
+ {
+ description: "simple placeholders within tokens",
+ input: `foo{bar} foo{bar}baz`,
+ expect: `foo{bar} foo{bar}baz`,
+ },
+ {
+ description: "placeholders and malformed braces",
+ input: `foo{bar} foo{ bar}baz`,
+ expect: `foo{bar} foo {
+ bar
+}
+
+baz`,
+ },
+ {
+ description: "hash within string is not a comment",
+ input: `redir / /some/#/path`,
+ expect: `redir / /some/#/path`,
+ },
+ {
+ description: "brace does not fold into comment above",
+ input: `# comment
+{
+ foo
+}`,
+ expect: `# comment
+{
+ foo
+}`,
+ },
+ {
+ description: "matthewpi/vscode-caddyfile-support#13",
+ input: `{
+ email {$ACMEEMAIL}
+ #debug
+}
+
+block {
+}
+`,
+ expect: `{
+ email {$ACMEEMAIL}
+ #debug
+}
+
+block {
+}
+`,
+ },
+ {
+ description: "matthewpi/vscode-caddyfile-support#13 - bad formatting",
+ input: `{
+ email {$ACMEEMAIL}
+ #debug
+ }
+
+ block {
+ }
+`,
+ expect: `{
+ email {$ACMEEMAIL}
+ #debug
+}
+
+block {
+}
+`,
+ },
+ {
+ description: "keep heredoc as-is",
+ input: `block {
+ heredoc < endIndex || endIndex > argCount {
+ caddy.Log().Named("caddyfile").Warn(
+ "Variadic placeholder "+token.Text+" indices are out of bounds, only "+strconv.Itoa(argCount)+" argument(s) exist",
+ zap.String("file", token.File+":"+strconv.Itoa(token.Line)), zap.Strings("import_chain", token.imports))
+ return false, 0, 0
+ }
+ return true, startIndex, endIndex
+}
+
+// makeArgsReplacer prepares a Replacer which can replace
+// non-variadic args placeholders in imported tokens.
+func makeArgsReplacer(args []string) *caddy.Replacer {
+ repl := caddy.NewEmptyReplacer()
+ repl.Map(func(key string) (any, bool) {
+ // TODO: Remove the deprecated {args.*} placeholder
+ // support at some point in the future
+ if matches := argsRegexpIndexDeprecated.FindStringSubmatch(key); len(matches) > 0 {
+ // What's matched may be a substring of the key
+ if matches[0] != key {
+ return nil, false
+ }
+
+ value, err := strconv.Atoi(matches[1])
+ if err != nil {
+ caddy.Log().Named("caddyfile").Warn(
+ "Placeholder {args." + matches[1] + "} has an invalid index")
+ return nil, false
+ }
+ if value >= len(args) {
+ caddy.Log().Named("caddyfile").Warn(
+ "Placeholder {args." + matches[1] + "} index is out of bounds, only " + strconv.Itoa(len(args)) + " argument(s) exist")
+ return nil, false
+ }
+ caddy.Log().Named("caddyfile").Warn(
+ "Placeholder {args." + matches[1] + "} deprecated, use {args[" + matches[1] + "]} instead")
+ return args[value], true
+ }
+
+ // Handle args[*] form
+ if matches := argsRegexpIndex.FindStringSubmatch(key); len(matches) > 0 {
+ // What's matched may be a substring of the key
+ if matches[0] != key {
+ return nil, false
+ }
+
+ if strings.Contains(matches[1], ":") {
+ caddy.Log().Named("caddyfile").Warn(
+ "Variadic placeholder {args[" + matches[1] + "]} must be a token on its own")
+ return nil, false
+ }
+ value, err := strconv.Atoi(matches[1])
+ if err != nil {
+ caddy.Log().Named("caddyfile").Warn(
+ "Placeholder {args[" + matches[1] + "]} has an invalid index")
+ return nil, false
+ }
+ if value >= len(args) {
+ caddy.Log().Named("caddyfile").Warn(
+ "Placeholder {args[" + matches[1] + "]} index is out of bounds, only " + strconv.Itoa(len(args)) + " argument(s) exist")
+ return nil, false
+ }
+ return args[value], true
+ }
+
+ // Not an args placeholder, ignore
+ return nil, false
+ })
+ return repl
+}
+
+var (
+ argsRegexpIndexDeprecated = regexp.MustCompile(`args\.(.+)`)
+ argsRegexpIndex = regexp.MustCompile(`args\[(.+)]`)
+)
diff --git a/caddyconfig/caddyfile/importgraph.go b/caddyconfig/caddyfile/importgraph.go
new file mode 100644
index 00000000000..ca859299dce
--- /dev/null
+++ b/caddyconfig/caddyfile/importgraph.go
@@ -0,0 +1,126 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "fmt"
+ "slices"
+)
+
+type adjacency map[string][]string
+
+type importGraph struct {
+ nodes map[string]struct{}
+ edges adjacency
+}
+
+func (i *importGraph) addNode(name string) {
+ if i.nodes == nil {
+ i.nodes = make(map[string]struct{})
+ }
+ if _, exists := i.nodes[name]; exists {
+ return
+ }
+ i.nodes[name] = struct{}{}
+}
+
+func (i *importGraph) addNodes(names []string) {
+ for _, name := range names {
+ i.addNode(name)
+ }
+}
+
+func (i *importGraph) removeNode(name string) {
+ delete(i.nodes, name)
+}
+
+func (i *importGraph) removeNodes(names []string) {
+ for _, name := range names {
+ i.removeNode(name)
+ }
+}
+
+func (i *importGraph) addEdge(from, to string) error {
+ if !i.exists(from) || !i.exists(to) {
+ return fmt.Errorf("one of the nodes does not exist")
+ }
+
+ if i.willCycle(to, from) {
+ return fmt.Errorf("a cycle of imports exists between %s and %s", from, to)
+ }
+
+ if i.areConnected(from, to) {
+ // if connected, there's nothing to do
+ return nil
+ }
+
+ if i.nodes == nil {
+ i.nodes = make(map[string]struct{})
+ }
+ if i.edges == nil {
+ i.edges = make(adjacency)
+ }
+
+ i.edges[from] = append(i.edges[from], to)
+ return nil
+}
+
+func (i *importGraph) addEdges(from string, tos []string) error {
+ for _, to := range tos {
+ err := i.addEdge(from, to)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (i *importGraph) areConnected(from, to string) bool {
+ al, ok := i.edges[from]
+ if !ok {
+ return false
+ }
+ return slices.Contains(al, to)
+}
+
+func (i *importGraph) willCycle(from, to string) bool {
+ collector := make(map[string]bool)
+
+ var visit func(string)
+ visit = func(start string) {
+ if !collector[start] {
+ collector[start] = true
+ for _, v := range i.edges[start] {
+ visit(v)
+ }
+ }
+ }
+
+ for _, v := range i.edges[from] {
+ visit(v)
+ }
+ for k := range collector {
+ if to == k {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (i *importGraph) exists(key string) bool {
+ _, exists := i.nodes[key]
+ return exists
+}
diff --git a/caddyconfig/caddyfile/lexer.go b/caddyconfig/caddyfile/lexer.go
new file mode 100644
index 00000000000..9b523f397ad
--- /dev/null
+++ b/caddyconfig/caddyfile/lexer.go
@@ -0,0 +1,399 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strings"
+ "unicode"
+)
+
+type (
+ // lexer is a utility which can get values, token by
+ // token, from a Reader. A token is a word, and tokens
+ // are separated by whitespace. A word can be enclosed
+ // in quotes if it contains whitespace.
+ lexer struct {
+ reader *bufio.Reader
+ token Token
+ line int
+ skippedLines int
+ }
+
+ // Token represents a single parsable unit.
+ Token struct {
+ File string
+ imports []string
+ Line int
+ Text string
+ wasQuoted rune // enclosing quote character, if any
+ heredocMarker string
+ snippetName string
+ }
+)
+
+// Tokenize takes bytes as input and lexes it into
+// a list of tokens that can be parsed as a Caddyfile.
+// Also takes a filename to fill the token's File as
+// the source of the tokens, which is important to
+// determine relative paths for `import` directives.
+func Tokenize(input []byte, filename string) ([]Token, error) {
+ l := lexer{}
+ if err := l.load(bytes.NewReader(input)); err != nil {
+ return nil, err
+ }
+ var tokens []Token
+ for {
+ found, err := l.next()
+ if err != nil {
+ return nil, err
+ }
+ if !found {
+ break
+ }
+ l.token.File = filename
+ tokens = append(tokens, l.token)
+ }
+ return tokens, nil
+}
+
+// load prepares the lexer to scan an input for tokens.
+// It discards any leading byte order mark.
+func (l *lexer) load(input io.Reader) error {
+ l.reader = bufio.NewReader(input)
+ l.line = 1
+
+ // discard byte order mark, if present
+ firstCh, _, err := l.reader.ReadRune()
+ if err != nil {
+ return err
+ }
+ if firstCh != 0xFEFF {
+ err := l.reader.UnreadRune()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// next loads the next token into the lexer.
+// A token is delimited by whitespace, unless
+// the token starts with a quotes character (")
+// in which case the token goes until the closing
+// quotes (the enclosing quotes are not included).
+// Inside quoted strings, quotes may be escaped
+// with a preceding \ character. No other chars
+// may be escaped. The rest of the line is skipped
+// if a "#" character is read in. Returns true if
+// a token was loaded; false otherwise.
+func (l *lexer) next() (bool, error) {
+ var val []rune
+ var comment, quoted, btQuoted, inHeredoc, heredocEscaped, escaped bool
+ var heredocMarker string
+
+ makeToken := func(quoted rune) bool {
+ l.token.Text = string(val)
+ l.token.wasQuoted = quoted
+ l.token.heredocMarker = heredocMarker
+ return true
+ }
+
+ for {
+ // Read a character in; if err then if we had
+ // read some characters, make a token. If we
+ // reached EOF, then no more tokens to read.
+ // If no EOF, then we had a problem.
+ ch, _, err := l.reader.ReadRune()
+ if err != nil {
+ if len(val) > 0 {
+ if inHeredoc {
+ return false, fmt.Errorf("incomplete heredoc <<%s on line #%d, expected ending marker %s", heredocMarker, l.line+l.skippedLines, heredocMarker)
+ }
+
+ return makeToken(0), nil
+ }
+ if err == io.EOF {
+ return false, nil
+ }
+ return false, err
+ }
+
+ // detect whether we have the start of a heredoc
+ if !(quoted || btQuoted) && !(inHeredoc || heredocEscaped) &&
+ len(val) > 1 && string(val[:2]) == "<<" {
+ // a space means it's just a regular token and not a heredoc
+ if ch == ' ' {
+ return makeToken(0), nil
+ }
+
+ // skip CR, we only care about LF
+ if ch == '\r' {
+ continue
+ }
+
+ // after hitting a newline, we know that the heredoc marker
+ // is the characters after the two << and the newline.
+ // we reset the val because the heredoc is syntax we don't
+ // want to keep.
+ if ch == '\n' {
+ if len(val) == 2 {
+ return false, fmt.Errorf("missing opening heredoc marker on line #%d; must contain only alpha-numeric characters, dashes and underscores; got empty string", l.line)
+ }
+
+ // check if there's too many <
+ if string(val[:3]) == "<<<" {
+ return false, fmt.Errorf("too many '<' for heredoc on line #%d; only use two, for example <= len(heredocMarker) && heredocMarker == string(val[len(val)-len(heredocMarker):]) {
+ // set the final value
+ val, err = l.finalizeHeredoc(val, heredocMarker)
+ if err != nil {
+ return false, err
+ }
+
+ // set the line counter, and make the token
+ l.line += l.skippedLines
+ l.skippedLines = 0
+ return makeToken('<'), nil
+ }
+
+ // stay in the heredoc until we find the ending marker
+ continue
+ }
+
+ // track whether we found an escape '\' for the next
+ // iteration to be contextually aware
+ if !escaped && !btQuoted && ch == '\\' {
+ escaped = true
+ continue
+ }
+
+ if quoted || btQuoted {
+ if quoted && escaped {
+ // all is literal in quoted area,
+ // so only escape quotes
+ if ch != '"' {
+ val = append(val, '\\')
+ }
+ escaped = false
+ } else {
+ if (quoted && ch == '"') || (btQuoted && ch == '`') {
+ return makeToken(ch), nil
+ }
+ }
+ // allow quoted text to wrap continue on multiple lines
+ if ch == '\n' {
+ l.line += 1 + l.skippedLines
+ l.skippedLines = 0
+ }
+ // collect this character as part of the quoted token
+ val = append(val, ch)
+ continue
+ }
+
+ if unicode.IsSpace(ch) {
+ // ignore CR altogether, we only actually care about LF (\n)
+ if ch == '\r' {
+ continue
+ }
+ // end of the line
+ if ch == '\n' {
+ // newlines can be escaped to chain arguments
+ // onto multiple lines; else, increment the line count
+ if escaped {
+ l.skippedLines++
+ escaped = false
+ } else {
+ l.line += 1 + l.skippedLines
+ l.skippedLines = 0
+ }
+ // comments (#) are single-line only
+ comment = false
+ }
+ // any kind of space means we're at the end of this token
+ if len(val) > 0 {
+ return makeToken(0), nil
+ }
+ continue
+ }
+
+ // comments must be at the start of a token,
+ // in other words, preceded by space or newline
+ if ch == '#' && len(val) == 0 {
+ comment = true
+ }
+ if comment {
+ continue
+ }
+
+ if len(val) == 0 {
+ l.token = Token{Line: l.line}
+ if ch == '"' {
+ quoted = true
+ continue
+ }
+ if ch == '`' {
+ btQuoted = true
+ continue
+ }
+ }
+
+ if escaped {
+ // allow escaping the first < to skip the heredoc syntax
+ if ch == '<' {
+ heredocEscaped = true
+ } else {
+ val = append(val, '\\')
+ }
+ escaped = false
+ }
+
+ val = append(val, ch)
+ }
+}
+
+// finalizeHeredoc takes the runes read as the heredoc text and the marker,
+// and processes the text to strip leading whitespace, returning the final
+// value without the leading whitespace.
+func (l *lexer) finalizeHeredoc(val []rune, marker string) ([]rune, error) {
+ stringVal := string(val)
+
+ // find the last newline of the heredoc, which is where the contents end
+ lastNewline := strings.LastIndex(stringVal, "\n")
+
+ // collapse the content, then split into separate lines
+ lines := strings.Split(stringVal[:lastNewline+1], "\n")
+
+ // figure out how much whitespace we need to strip from the front of every line
+ // by getting the string that precedes the marker, on the last line
+ paddingToStrip := stringVal[lastNewline+1 : len(stringVal)-len(marker)]
+
+ // iterate over each line and strip the whitespace from the front
+ var out string
+ for lineNum, lineText := range lines[:len(lines)-1] {
+ if lineText == "" || lineText == "\r" {
+ out += "\n"
+ continue
+ }
+
+ // find an exact match for the padding
+ index := strings.Index(lineText, paddingToStrip)
+
+ // if the padding doesn't match exactly at the start then we can't safely strip
+ if index != 0 {
+ return nil, fmt.Errorf("mismatched leading whitespace in heredoc <<%s on line #%d [%s], expected whitespace [%s] to match the closing marker", marker, l.line+lineNum+1, lineText, paddingToStrip)
+ }
+
+ // strip, then append the line, with the newline, to the output.
+ // also removes all "\r" because Windows.
+ out += strings.ReplaceAll(lineText[len(paddingToStrip):]+"\n", "\r", "")
+ }
+
+ // Remove the trailing newline from the loop
+ if len(out) > 0 && out[len(out)-1] == '\n' {
+ out = out[:len(out)-1]
+ }
+
+ // return the final value
+ return []rune(out), nil
+}
+
+// Quoted returns true if the token was enclosed in quotes
+// (i.e. double quotes, backticks, or heredoc).
+func (t Token) Quoted() bool {
+ return t.wasQuoted > 0
+}
+
+// NumLineBreaks counts how many line breaks are in the token text.
+func (t Token) NumLineBreaks() int {
+ lineBreaks := strings.Count(t.Text, "\n")
+ if t.wasQuoted == '<' {
+ // heredocs have an extra linebreak because the opening
+ // delimiter is on its own line and is not included in the
+ // token Text itself, and the trailing newline is removed.
+ lineBreaks += 2
+ }
+ return lineBreaks
+}
+
+// Clone returns a deep copy of the token.
+func (t Token) Clone() Token {
+ return Token{
+ File: t.File,
+ imports: append([]string{}, t.imports...),
+ Line: t.Line,
+ Text: t.Text,
+ wasQuoted: t.wasQuoted,
+ heredocMarker: t.heredocMarker,
+ snippetName: t.snippetName,
+ }
+}
+
+var heredocMarkerRegexp = regexp.MustCompile("^[A-Za-z0-9_-]+$")
+
+// isNextOnNewLine tests whether t2 is on a different line from t1
+func isNextOnNewLine(t1, t2 Token) bool {
+ // If the second token is from a different file,
+ // we can assume it's from a different line
+ if t1.File != t2.File {
+ return true
+ }
+
+ // If the second token is from a different import chain,
+ // we can assume it's from a different line
+ if len(t1.imports) != len(t2.imports) {
+ return true
+ }
+ for i, im := range t1.imports {
+ if im != t2.imports[i] {
+ return true
+ }
+ }
+
+ // If the first token (incl line breaks) ends
+ // on a line earlier than the next token,
+ // then the second token is on a new line
+ return t1.Line+t1.NumLineBreaks() < t2.Line
+}
diff --git a/caddyconfig/caddyfile/lexer_fuzz.go b/caddyconfig/caddyfile/lexer_fuzz.go
new file mode 100644
index 00000000000..6f75694b5a0
--- /dev/null
+++ b/caddyconfig/caddyfile/lexer_fuzz.go
@@ -0,0 +1,28 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build gofuzz
+
+package caddyfile
+
+func FuzzTokenize(input []byte) int {
+ tokens, err := Tokenize(input, "Caddyfile")
+ if err != nil {
+ return 0
+ }
+ if len(tokens) == 0 {
+ return -1
+ }
+ return 1
+}
diff --git a/caddyconfig/caddyfile/lexer_test.go b/caddyconfig/caddyfile/lexer_test.go
new file mode 100644
index 00000000000..7389af79b40
--- /dev/null
+++ b/caddyconfig/caddyfile/lexer_test.go
@@ -0,0 +1,541 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "testing"
+)
+
+func TestLexer(t *testing.T) {
+ testCases := []struct {
+ input []byte
+ expected []Token
+ expectErr bool
+ errorMessage string
+ }{
+ {
+ input: []byte(`host:123`),
+ expected: []Token{
+ {Line: 1, Text: "host:123"},
+ },
+ },
+ {
+ input: []byte(`host:123
+
+ directive`),
+ expected: []Token{
+ {Line: 1, Text: "host:123"},
+ {Line: 3, Text: "directive"},
+ },
+ },
+ {
+ input: []byte(`host:123 {
+ directive
+ }`),
+ expected: []Token{
+ {Line: 1, Text: "host:123"},
+ {Line: 1, Text: "{"},
+ {Line: 2, Text: "directive"},
+ {Line: 3, Text: "}"},
+ },
+ },
+ {
+ input: []byte(`host:123 { directive }`),
+ expected: []Token{
+ {Line: 1, Text: "host:123"},
+ {Line: 1, Text: "{"},
+ {Line: 1, Text: "directive"},
+ {Line: 1, Text: "}"},
+ },
+ },
+ {
+ input: []byte(`host:123 {
+ #comment
+ directive
+ # comment
+ foobar # another comment
+ }`),
+ expected: []Token{
+ {Line: 1, Text: "host:123"},
+ {Line: 1, Text: "{"},
+ {Line: 3, Text: "directive"},
+ {Line: 5, Text: "foobar"},
+ {Line: 6, Text: "}"},
+ },
+ },
+ {
+ input: []byte(`host:123 {
+ # hash inside string is not a comment
+ redir / /some/#/path
+ }`),
+ expected: []Token{
+ {Line: 1, Text: "host:123"},
+ {Line: 1, Text: "{"},
+ {Line: 3, Text: "redir"},
+ {Line: 3, Text: "/"},
+ {Line: 3, Text: "/some/#/path"},
+ {Line: 4, Text: "}"},
+ },
+ },
+ {
+ input: []byte("# comment at beginning of file\n# comment at beginning of line\nhost:123"),
+ expected: []Token{
+ {Line: 3, Text: "host:123"},
+ },
+ },
+ {
+ input: []byte(`a "quoted value" b
+ foobar`),
+ expected: []Token{
+ {Line: 1, Text: "a"},
+ {Line: 1, Text: "quoted value"},
+ {Line: 1, Text: "b"},
+ {Line: 2, Text: "foobar"},
+ },
+ },
+ {
+ input: []byte(`A "quoted \"value\" inside" B`),
+ expected: []Token{
+ {Line: 1, Text: "A"},
+ {Line: 1, Text: `quoted "value" inside`},
+ {Line: 1, Text: "B"},
+ },
+ },
+ {
+ input: []byte("An escaped \"newline\\\ninside\" quotes"),
+ expected: []Token{
+ {Line: 1, Text: "An"},
+ {Line: 1, Text: "escaped"},
+ {Line: 1, Text: "newline\\\ninside"},
+ {Line: 2, Text: "quotes"},
+ },
+ },
+ {
+ input: []byte("An escaped newline\\\noutside quotes"),
+ expected: []Token{
+ {Line: 1, Text: "An"},
+ {Line: 1, Text: "escaped"},
+ {Line: 1, Text: "newline"},
+ {Line: 1, Text: "outside"},
+ {Line: 1, Text: "quotes"},
+ },
+ },
+ {
+ input: []byte("line1\\\nescaped\nline2\nline3"),
+ expected: []Token{
+ {Line: 1, Text: "line1"},
+ {Line: 1, Text: "escaped"},
+ {Line: 3, Text: "line2"},
+ {Line: 4, Text: "line3"},
+ },
+ },
+ {
+ input: []byte("line1\\\nescaped1\\\nescaped2\nline4\nline5"),
+ expected: []Token{
+ {Line: 1, Text: "line1"},
+ {Line: 1, Text: "escaped1"},
+ {Line: 1, Text: "escaped2"},
+ {Line: 4, Text: "line4"},
+ {Line: 5, Text: "line5"},
+ },
+ },
+ {
+ input: []byte(`"unescapable\ in quotes"`),
+ expected: []Token{
+ {Line: 1, Text: `unescapable\ in quotes`},
+ },
+ },
+ {
+ input: []byte(`"don't\escape"`),
+ expected: []Token{
+ {Line: 1, Text: `don't\escape`},
+ },
+ },
+ {
+ input: []byte(`"don't\\escape"`),
+ expected: []Token{
+ {Line: 1, Text: `don't\\escape`},
+ },
+ },
+ {
+ input: []byte(`un\escapable`),
+ expected: []Token{
+ {Line: 1, Text: `un\escapable`},
+ },
+ },
+ {
+ input: []byte(`A "quoted value with line
+ break inside" {
+ foobar
+ }`),
+ expected: []Token{
+ {Line: 1, Text: "A"},
+ {Line: 1, Text: "quoted value with line\n\t\t\t\t\tbreak inside"},
+ {Line: 2, Text: "{"},
+ {Line: 3, Text: "foobar"},
+ {Line: 4, Text: "}"},
+ },
+ },
+ {
+ input: []byte(`"C:\php\php-cgi.exe"`),
+ expected: []Token{
+ {Line: 1, Text: `C:\php\php-cgi.exe`},
+ },
+ },
+ {
+ input: []byte(`empty "" string`),
+ expected: []Token{
+ {Line: 1, Text: `empty`},
+ {Line: 1, Text: ``},
+ {Line: 1, Text: `string`},
+ },
+ },
+ {
+ input: []byte("skip those\r\nCR characters"),
+ expected: []Token{
+ {Line: 1, Text: "skip"},
+ {Line: 1, Text: "those"},
+ {Line: 2, Text: "CR"},
+ {Line: 2, Text: "characters"},
+ },
+ },
+ {
+ input: []byte("\xEF\xBB\xBF:8080"), // test with leading byte order mark
+ expected: []Token{
+ {Line: 1, Text: ":8080"},
+ },
+ },
+ {
+ input: []byte("simple `backtick quoted` string"),
+ expected: []Token{
+ {Line: 1, Text: `simple`},
+ {Line: 1, Text: `backtick quoted`},
+ {Line: 1, Text: `string`},
+ },
+ },
+ {
+ input: []byte("multiline `backtick\nquoted\n` string"),
+ expected: []Token{
+ {Line: 1, Text: `multiline`},
+ {Line: 1, Text: "backtick\nquoted\n"},
+ {Line: 3, Text: `string`},
+ },
+ },
+ {
+ input: []byte("nested `\"quotes inside\" backticks` string"),
+ expected: []Token{
+ {Line: 1, Text: `nested`},
+ {Line: 1, Text: `"quotes inside" backticks`},
+ {Line: 1, Text: `string`},
+ },
+ },
+ {
+ input: []byte("reverse-nested \"`backticks` inside\" quotes"),
+ expected: []Token{
+ {Line: 1, Text: `reverse-nested`},
+ {Line: 1, Text: "`backticks` inside"},
+ {Line: 1, Text: `quotes`},
+ },
+ },
+ {
+ input: []byte(`heredoc <>`),
+ expected: []Token{
+ {Line: 1, Text: `escaped-heredoc`},
+ {Line: 1, Text: `<<`},
+ {Line: 1, Text: `>>`},
+ },
+ },
+ {
+ input: []byte(`not-a-heredoc >"`),
+ expected: []Token{
+ {Line: 1, Text: `not-a-heredoc`},
+ {Line: 1, Text: `<<`},
+ {Line: 1, Text: `>>`},
+ },
+ },
+ {
+ input: []byte(`not-a-heredoc << >>`),
+ expected: []Token{
+ {Line: 1, Text: `not-a-heredoc`},
+ {Line: 1, Text: `<<`},
+ {Line: 1, Text: `>>`},
+ },
+ },
+ {
+ input: []byte(`not-a-heredoc < 0 || len(p.block.Segments) > 0 {
+ blocks = append(blocks, p.block)
+ }
+ if p.nesting > 0 {
+ return blocks, p.EOFErr()
+ }
+ }
+
+ return blocks, nil
+}
+
+func (p *parser) parseOne() error {
+ p.block = ServerBlock{}
+ return p.begin()
+}
+
+func (p *parser) begin() error {
+ if len(p.tokens) == 0 {
+ return nil
+ }
+
+ err := p.addresses()
+ if err != nil {
+ return err
+ }
+
+ if p.eof {
+ // this happens if the Caddyfile consists of only
+ // a line of addresses and nothing else
+ return nil
+ }
+
+ if ok, name := p.isNamedRoute(); ok {
+ // we just need a dummy leading token to ease parsing later
+ nameToken := p.Token()
+ nameToken.Text = name
+
+ // named routes only have one key, the route name
+ p.block.Keys = []Token{nameToken}
+ p.block.IsNamedRoute = true
+
+ // get all the tokens from the block, including the braces
+ tokens, err := p.blockTokens(true)
+ if err != nil {
+ return err
+ }
+ tokens = append([]Token{nameToken}, tokens...)
+ p.block.Segments = []Segment{tokens}
+ return nil
+ }
+
+ if ok, name := p.isSnippet(); ok {
+ if p.definedSnippets == nil {
+ p.definedSnippets = map[string][]Token{}
+ }
+ if _, found := p.definedSnippets[name]; found {
+ return p.Errf("redeclaration of previously declared snippet %s", name)
+ }
+ // consume all tokens til matched close brace
+ tokens, err := p.blockTokens(false)
+ if err != nil {
+ return err
+ }
+ // Just as we need to track which file the token comes from, we need to
+ // keep track of which snippet the token comes from. This is helpful
+ // in tracking import cycles across files/snippets by namespacing them.
+ // Without this, we end up with false-positives in cycle-detection.
+ for k, v := range tokens {
+ v.snippetName = name
+ tokens[k] = v
+ }
+ p.definedSnippets[name] = tokens
+ // empty block keys so we don't save this block as a real server.
+ p.block.Keys = nil
+ return nil
+ }
+
+ return p.blockContents()
+}
+
+func (p *parser) addresses() error {
+ var expectingAnother bool
+
+ for {
+ value := p.Val()
+ token := p.Token()
+
+ // Reject request matchers if trying to define them globally
+ if strings.HasPrefix(value, "@") {
+ return p.Errf("request matchers may not be defined globally, they must be in a site block; found %s", value)
+ }
+
+ // Special case: import directive replaces tokens during parse-time
+ if value == "import" && p.isNewLine() {
+ err := p.doImport(0)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Open brace definitely indicates end of addresses
+ if value == "{" {
+ if expectingAnother {
+ return p.Errf("Expected another address but had '%s' - check for extra comma", value)
+ }
+ // Mark this server block as being defined with braces.
+ // This is used to provide a better error message when
+ // the user may have tried to define two server blocks
+ // without having used braces, which are required in
+ // that case.
+ p.block.HasBraces = true
+ break
+ }
+
+ // Users commonly forget to place a space between the address and the '{'
+ if strings.HasSuffix(value, "{") {
+ return p.Errf("Site addresses cannot end with a curly brace: '%s' - put a space between the token and the brace", value)
+ }
+
+ if value != "" { // empty token possible if user typed ""
+ // Trailing comma indicates another address will follow, which
+ // may possibly be on the next line
+ if value[len(value)-1] == ',' {
+ value = value[:len(value)-1]
+ expectingAnother = true
+ } else {
+ expectingAnother = false // but we may still see another one on this line
+ }
+
+ // If there's a comma here, it's probably because they didn't use a space
+ // between their two domains, e.g. "foo.com,bar.com", which would not be
+ // parsed as two separate site addresses.
+ if strings.Contains(value, ",") {
+ return p.Errf("Site addresses cannot contain a comma ',': '%s' - put a space after the comma to separate site addresses", value)
+ }
+
+ // After the above, a comma surrounded by spaces would result
+ // in an empty token which we should ignore
+ if value != "" {
+ // Add the token as a site address
+ token.Text = value
+ p.block.Keys = append(p.block.Keys, token)
+ }
+ }
+
+ // Advance token and possibly break out of loop or return error
+ hasNext := p.Next()
+ if expectingAnother && !hasNext {
+ return p.EOFErr()
+ }
+ if !hasNext {
+ p.eof = true
+ break // EOF
+ }
+ if !expectingAnother && p.isNewLine() {
+ break
+ }
+ }
+
+ return nil
+}
+
+func (p *parser) blockContents() error {
+ errOpenCurlyBrace := p.openCurlyBrace()
+ if errOpenCurlyBrace != nil {
+ // single-server configs don't need curly braces
+ p.cursor--
+ }
+
+ err := p.directives()
+ if err != nil {
+ return err
+ }
+
+ // only look for close curly brace if there was an opening
+ if errOpenCurlyBrace == nil {
+ err = p.closeCurlyBrace()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// directives parses through all the lines for directives
+// and it expects the next token to be the first
+// directive. It goes until EOF or closing curly brace
+// which ends the server block.
+func (p *parser) directives() error {
+ for p.Next() {
+ // end of server block
+ if p.Val() == "}" {
+ // p.nesting has already been decremented
+ break
+ }
+
+ // special case: import directive replaces tokens during parse-time
+ if p.Val() == "import" {
+ err := p.doImport(1)
+ if err != nil {
+ return err
+ }
+ p.cursor-- // cursor is advanced when we continue, so roll back one more
+ continue
+ }
+
+ // normal case: parse a directive as a new segment
+ // (a "segment" is a line which starts with a directive
+ // and which ends at the end of the line or at the end of
+ // the block that is opened at the end of the line)
+ if err := p.directive(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doImport swaps out the import directive and its argument
+// (a total of 2 tokens) with the tokens in the specified file
+// or globbing pattern. When the function returns, the cursor
+// is on the token before where the import directive was. In
+// other words, call Next() to access the first token that was
+// imported.
+func (p *parser) doImport(nesting int) error {
+ // syntax checks
+ if !p.NextArg() {
+ return p.ArgErr()
+ }
+ importPattern := p.Val()
+ if importPattern == "" {
+ return p.Err("Import requires a non-empty filepath")
+ }
+
+ // grab remaining args as placeholder replacements
+ args := p.RemainingArgs()
+
+ // set up a replacer for non-variadic args replacement
+ repl := makeArgsReplacer(args)
+
+ // grab all the tokens (if it exists) from within a block that follows the import
+ var blockTokens []Token
+ for currentNesting := p.Nesting(); p.NextBlock(currentNesting); {
+ blockTokens = append(blockTokens, p.Token())
+ }
+ // initialize with size 1
+ blockMapping := make(map[string][]Token, 1)
+ if len(blockTokens) > 0 {
+ // use such tokens to create a new dispenser, and then use it to parse each block
+ bd := NewDispenser(blockTokens)
+ for bd.Next() {
+ // see if we can grab a key
+ var currentMappingKey string
+ if bd.Val() == "{" {
+ return p.Err("anonymous blocks are not supported")
+ }
+ currentMappingKey = bd.Val()
+ currentMappingTokens := []Token{}
+ // read all args until end of line / {
+ if bd.NextArg() {
+ currentMappingTokens = append(currentMappingTokens, bd.Token())
+ for bd.NextArg() {
+ currentMappingTokens = append(currentMappingTokens, bd.Token())
+ }
+ // TODO(elee1766): we don't enter another mapping here because it's annoying to extract the { and } properly.
+ // maybe someone can do that in the future
+ } else {
+ // attempt to enter a block and add tokens to the currentMappingTokens
+ for mappingNesting := bd.Nesting(); bd.NextBlock(mappingNesting); {
+ currentMappingTokens = append(currentMappingTokens, bd.Token())
+ }
+ }
+ blockMapping[currentMappingKey] = currentMappingTokens
+ }
+ }
+
+ // splice out the import directive and its arguments
+ // (2 tokens, plus the length of args)
+ tokensBefore := p.tokens[:p.cursor-1-len(args)-len(blockTokens)]
+ tokensAfter := p.tokens[p.cursor+1:]
+ var importedTokens []Token
+ var nodes []string
+
+ // first check snippets. That is a simple, non-recursive replacement
+ if p.definedSnippets != nil && p.definedSnippets[importPattern] != nil {
+ importedTokens = p.definedSnippets[importPattern]
+ if len(importedTokens) > 0 {
+ // just grab the first one
+ nodes = append(nodes, fmt.Sprintf("%s:%s", importedTokens[0].File, importedTokens[0].snippetName))
+ }
+ } else {
+ // make path relative to the file of the _token_ being processed rather
+ // than current working directory (issue #867) and then use glob to get
+ // list of matching filenames
+ absFile, err := caddy.FastAbs(p.Dispenser.File())
+ if err != nil {
+ return p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.File(), err)
+ }
+
+ var matches []string
+ var globPattern string
+ if !filepath.IsAbs(importPattern) {
+ globPattern = filepath.Join(filepath.Dir(absFile), importPattern)
+ } else {
+ globPattern = importPattern
+ }
+ if strings.Count(globPattern, "*") > 1 || strings.Count(globPattern, "?") > 1 ||
+ (strings.Contains(globPattern, "[") && strings.Contains(globPattern, "]")) {
+ // See issue #2096 - a pattern with many glob expansions can hang for too long
+ return p.Errf("Glob pattern may only contain one wildcard (*), but has others: %s", globPattern)
+ }
+ matches, err = filepath.Glob(globPattern)
+ if err != nil {
+ return p.Errf("Failed to use import pattern %s: %v", importPattern, err)
+ }
+ if len(matches) == 0 {
+ if strings.ContainsAny(globPattern, "*?[]") {
+ caddy.Log().Warn("No files matching import glob pattern", zap.String("pattern", importPattern))
+ } else {
+ return p.Errf("File to import not found: %s", importPattern)
+ }
+ } else {
+ // See issue #5295 - should skip any files that start with a . when iterating over them.
+ sep := string(filepath.Separator)
+ segGlobPattern := strings.Split(globPattern, sep)
+ if strings.HasPrefix(segGlobPattern[len(segGlobPattern)-1], "*") {
+ var tmpMatches []string
+ for _, m := range matches {
+ seg := strings.Split(m, sep)
+ if !strings.HasPrefix(seg[len(seg)-1], ".") {
+ tmpMatches = append(tmpMatches, m)
+ }
+ }
+ matches = tmpMatches
+ }
+ }
+
+ // collect all the imported tokens
+ for _, importFile := range matches {
+ newTokens, err := p.doSingleImport(importFile)
+ if err != nil {
+ return err
+ }
+ importedTokens = append(importedTokens, newTokens...)
+ }
+ nodes = matches
+ }
+
+ nodeName := p.File()
+ if p.Token().snippetName != "" {
+ nodeName += fmt.Sprintf(":%s", p.Token().snippetName)
+ }
+ p.importGraph.addNode(nodeName)
+ p.importGraph.addNodes(nodes)
+ if err := p.importGraph.addEdges(nodeName, nodes); err != nil {
+ p.importGraph.removeNodes(nodes)
+ return err
+ }
+
+ // copy the tokens so we don't overwrite p.definedSnippets
+ tokensCopy := make([]Token, 0, len(importedTokens))
+
+ var (
+ maybeSnippet bool
+ maybeSnippetId bool
+ index int
+ )
+
+ // run the argument replacer on the tokens
+ // golang for range slice return a copy of value
+ // similarly, append also copy value
+ for i, token := range importedTokens {
+ // update the token's imports to refer to import directive filename, line number and snippet name if there is one
+ if token.snippetName != "" {
+ token.imports = append(token.imports, fmt.Sprintf("%s:%d (import %s)", p.File(), p.Line(), token.snippetName))
+ } else {
+ token.imports = append(token.imports, fmt.Sprintf("%s:%d (import)", p.File(), p.Line()))
+ }
+
+ // naive way of determine snippets, as snippets definition can only follow name + block
+ // format, won't check for nesting correctness or any other error, that's what parser does.
+ if !maybeSnippet && nesting == 0 {
+ // first of the line
+ if i == 0 || isNextOnNewLine(tokensCopy[i-1], token) {
+ index = 0
+ } else {
+ index++
+ }
+
+ if index == 0 && len(token.Text) >= 3 && strings.HasPrefix(token.Text, "(") && strings.HasSuffix(token.Text, ")") {
+ maybeSnippetId = true
+ }
+ }
+
+ switch token.Text {
+ case "{":
+ nesting++
+ if index == 1 && maybeSnippetId && nesting == 1 {
+ maybeSnippet = true
+ maybeSnippetId = false
+ }
+ case "}":
+ nesting--
+ if nesting == 0 && maybeSnippet {
+ maybeSnippet = false
+ }
+ }
+ // if it is {block}, we substitute with all tokens in the block
+ // if it is {blocks.*}, we substitute with the tokens in the mapping for the *
+ var skip bool
+ var tokensToAdd []Token
+ switch {
+ case token.Text == "{block}":
+ tokensToAdd = blockTokens
+ case strings.HasPrefix(token.Text, "{blocks.") && strings.HasSuffix(token.Text, "}"):
+ // {blocks.foo.bar} will be extracted to key `foo.bar`
+ blockKey := strings.TrimPrefix(strings.TrimSuffix(token.Text, "}"), "{blocks.")
+ val, ok := blockMapping[blockKey]
+ if ok {
+ tokensToAdd = val
+ }
+ default:
+ skip = true
+ }
+ if !skip {
+ if len(tokensToAdd) == 0 {
+ // if there is no content in the snippet block, don't do any replacement
+ // this allows snippets which contained {block}/{block.*} before this change to continue functioning as normal
+ tokensCopy = append(tokensCopy, token)
+ } else {
+ tokensCopy = append(tokensCopy, tokensToAdd...)
+ }
+ continue
+ }
+
+ if maybeSnippet {
+ tokensCopy = append(tokensCopy, token)
+ continue
+ }
+
+ foundVariadic, startIndex, endIndex := parseVariadic(token, len(args))
+ if foundVariadic {
+ for _, arg := range args[startIndex:endIndex] {
+ token.Text = arg
+ tokensCopy = append(tokensCopy, token)
+ }
+ } else {
+ token.Text = repl.ReplaceKnown(token.Text, "")
+ tokensCopy = append(tokensCopy, token)
+ }
+ }
+
+ // splice the imported tokens in the place of the import statement
+ // and rewind cursor so Next() will land on first imported token
+ p.tokens = append(tokensBefore, append(tokensCopy, tokensAfter...)...)
+ p.cursor -= len(args) + len(blockTokens) + 1
+
+ return nil
+}
+
+// doSingleImport lexes the individual file at importFile and returns
+// its tokens or an error, if any.
+func (p *parser) doSingleImport(importFile string) ([]Token, error) {
+ file, err := os.Open(importFile)
+ if err != nil {
+ return nil, p.Errf("Could not import %s: %v", importFile, err)
+ }
+ defer file.Close()
+
+ if info, err := file.Stat(); err != nil {
+ return nil, p.Errf("Could not import %s: %v", importFile, err)
+ } else if info.IsDir() {
+ return nil, p.Errf("Could not import %s: is a directory", importFile)
+ }
+
+ input, err := io.ReadAll(file)
+ if err != nil {
+ return nil, p.Errf("Could not read imported file %s: %v", importFile, err)
+ }
+
+ // only warning in case of empty files
+ if len(input) == 0 || len(strings.TrimSpace(string(input))) == 0 {
+ caddy.Log().Warn("Import file is empty", zap.String("file", importFile))
+ return []Token{}, nil
+ }
+
+ importedTokens, err := allTokens(importFile, input)
+ if err != nil {
+ return nil, p.Errf("Could not read tokens while importing %s: %v", importFile, err)
+ }
+
+ // Tack the file path onto these tokens so errors show the imported file's name
+ // (we use full, absolute path to avoid bugs: issue #1892)
+ filename, err := caddy.FastAbs(importFile)
+ if err != nil {
+ return nil, p.Errf("Failed to get absolute path of file: %s: %v", importFile, err)
+ }
+ for i := 0; i < len(importedTokens); i++ {
+ importedTokens[i].File = filename
+ }
+
+ return importedTokens, nil
+}
+
+// directive collects tokens until the directive's scope
+// closes (either end of line or end of curly brace block).
+// It expects the currently-loaded token to be a directive
+// (or } that ends a server block). The collected tokens
+// are loaded into the current server block for later use
+// by directive setup functions.
+func (p *parser) directive() error {
+ // a segment is a list of tokens associated with this directive
+ var segment Segment
+
+ // the directive itself is appended as a relevant token
+ segment = append(segment, p.Token())
+
+ for p.Next() {
+ if p.Val() == "{" {
+ p.nesting++
+ if !p.isNextOnNewLine() && p.Token().wasQuoted == 0 {
+ return p.Err("Unexpected next token after '{' on same line")
+ }
+ if p.isNewLine() {
+ return p.Err("Unexpected '{' on a new line; did you mean to place the '{' on the previous line?")
+ }
+ } else if p.Val() == "{}" {
+ if p.isNextOnNewLine() && p.Token().wasQuoted == 0 {
+ return p.Err("Unexpected '{}' at end of line")
+ }
+ } else if p.isNewLine() && p.nesting == 0 {
+ p.cursor-- // read too far
+ break
+ } else if p.Val() == "}" && p.nesting > 0 {
+ p.nesting--
+ } else if p.Val() == "}" && p.nesting == 0 {
+ return p.Err("Unexpected '}' because no matching opening brace")
+ } else if p.Val() == "import" && p.isNewLine() {
+ if err := p.doImport(1); err != nil {
+ return err
+ }
+ p.cursor-- // cursor is advanced when we continue, so roll back one more
+ continue
+ }
+
+ segment = append(segment, p.Token())
+ }
+
+ p.block.Segments = append(p.block.Segments, segment)
+
+ if p.nesting > 0 {
+ return p.EOFErr()
+ }
+
+ return nil
+}
+
+// openCurlyBrace expects the current token to be an
+// opening curly brace. This acts like an assertion
+// because it returns an error if the token is not
+// a opening curly brace. It does NOT advance the token.
+func (p *parser) openCurlyBrace() error {
+ if p.Val() != "{" {
+ return p.SyntaxErr("{")
+ }
+ return nil
+}
+
+// closeCurlyBrace expects the current token to be
+// a closing curly brace. This acts like an assertion
+// because it returns an error if the token is not
+// a closing curly brace. It does NOT advance the token.
+func (p *parser) closeCurlyBrace() error {
+ if p.Val() != "}" {
+ return p.SyntaxErr("}")
+ }
+ return nil
+}
+
+func (p *parser) isNamedRoute() (bool, string) {
+ keys := p.block.Keys
+ // A named route block is a single key with parens, prefixed with &.
+ if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "&(") && strings.HasSuffix(keys[0].Text, ")") {
+ return true, strings.TrimSuffix(keys[0].Text[2:], ")")
+ }
+ return false, ""
+}
+
+func (p *parser) isSnippet() (bool, string) {
+ keys := p.block.Keys
+ // A snippet block is a single key with parens. Nothing else qualifies.
+ if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "(") && strings.HasSuffix(keys[0].Text, ")") {
+ return true, strings.TrimSuffix(keys[0].Text[1:], ")")
+ }
+ return false, ""
+}
+
+// read and store everything in a block for later replay.
+func (p *parser) blockTokens(retainCurlies bool) ([]Token, error) {
+ // block must have curlies.
+ err := p.openCurlyBrace()
+ if err != nil {
+ return nil, err
+ }
+ nesting := 1 // count our own nesting
+ tokens := []Token{}
+ if retainCurlies {
+ tokens = append(tokens, p.Token())
+ }
+ for p.Next() {
+ if p.Val() == "}" {
+ nesting--
+ if nesting == 0 {
+ if retainCurlies {
+ tokens = append(tokens, p.Token())
+ }
+ break
+ }
+ }
+ if p.Val() == "{" {
+ nesting++
+ }
+ tokens = append(tokens, p.tokens[p.cursor])
+ }
+ // make sure we're matched up
+ if nesting != 0 {
+ return nil, p.SyntaxErr("}")
+ }
+ return tokens, nil
+}
+
+// ServerBlock associates any number of keys from the
+// head of the server block with tokens, which are
+// grouped by segments.
+type ServerBlock struct {
+ HasBraces bool
+ Keys []Token
+ Segments []Segment
+ IsNamedRoute bool
+}
+
+func (sb ServerBlock) GetKeysText() []string {
+ res := []string{}
+ for _, k := range sb.Keys {
+ res = append(res, k.Text)
+ }
+ return res
+}
+
+// DispenseDirective returns a dispenser that contains
+// all the tokens in the server block.
+func (sb ServerBlock) DispenseDirective(dir string) *Dispenser {
+ var tokens []Token
+ for _, seg := range sb.Segments {
+ if len(seg) > 0 && seg[0].Text == dir {
+ tokens = append(tokens, seg...)
+ }
+ }
+ return NewDispenser(tokens)
+}
+
+// Segment is a list of tokens which begins with a directive
+// and ends at the end of the directive (either at the end of
+// the line, or at the end of a block it opens).
+type Segment []Token
+
+// Directive returns the directive name for the segment.
+// The directive name is the text of the first token.
+func (s Segment) Directive() string {
+ if len(s) > 0 {
+ return s[0].Text
+ }
+ return ""
+}
+
+// spanOpen and spanClose are used to bound spans that
+// contain the name of an environment variable.
+var (
+ spanOpen, spanClose = []byte{'{', '$'}, []byte{'}'}
+ envVarDefaultDelimiter = ":"
+)
diff --git a/caddyconfig/caddyfile/parse_test.go b/caddyconfig/caddyfile/parse_test.go
new file mode 100644
index 00000000000..d3fada4e026
--- /dev/null
+++ b/caddyconfig/caddyfile/parse_test.go
@@ -0,0 +1,889 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestParseVariadic(t *testing.T) {
+ args := make([]string, 10)
+ for i, tc := range []struct {
+ input string
+ result bool
+ }{
+ {
+ input: "",
+ result: false,
+ },
+ {
+ input: "{args[1",
+ result: false,
+ },
+ {
+ input: "1]}",
+ result: false,
+ },
+ {
+ input: "{args[:]}aaaaa",
+ result: false,
+ },
+ {
+ input: "aaaaa{args[:]}",
+ result: false,
+ },
+ {
+ input: "{args.}",
+ result: false,
+ },
+ {
+ input: "{args.1}",
+ result: false,
+ },
+ {
+ input: "{args[]}",
+ result: false,
+ },
+ {
+ input: "{args[:]}",
+ result: true,
+ },
+ {
+ input: "{args[:]}",
+ result: true,
+ },
+ {
+ input: "{args[0:]}",
+ result: true,
+ },
+ {
+ input: "{args[:0]}",
+ result: true,
+ },
+ {
+ input: "{args[-1:]}",
+ result: false,
+ },
+ {
+ input: "{args[:11]}",
+ result: false,
+ },
+ {
+ input: "{args[10:0]}",
+ result: false,
+ },
+ {
+ input: "{args[0:10]}",
+ result: true,
+ },
+ {
+ input: "{args[0]}:{args[1]}:{args[2]}",
+ result: false,
+ },
+ } {
+ token := Token{
+ File: "test",
+ Line: 1,
+ Text: tc.input,
+ }
+ if v, _, _ := parseVariadic(token, len(args)); v != tc.result {
+ t.Errorf("Test %d error expectation failed Expected: %t, got %t", i, tc.result, v)
+ }
+ }
+}
+
+func TestAllTokens(t *testing.T) {
+ input := []byte("a b c\nd e")
+ expected := []string{"a", "b", "c", "d", "e"}
+ tokens, err := allTokens("TestAllTokens", input)
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if len(tokens) != len(expected) {
+ t.Fatalf("Expected %d tokens, got %d", len(expected), len(tokens))
+ }
+
+ for i, val := range expected {
+ if tokens[i].Text != val {
+ t.Errorf("Token %d should be '%s' but was '%s'", i, val, tokens[i].Text)
+ }
+ }
+}
+
+func TestParseOneAndImport(t *testing.T) {
+ testParseOne := func(input string) (ServerBlock, error) {
+ p := testParser(input)
+ p.Next() // parseOne doesn't call Next() to start, so we must
+ err := p.parseOne()
+ return p.block, err
+ }
+
+ for i, test := range []struct {
+ input string
+ shouldErr bool
+ keys []string
+ numTokens []int // number of tokens to expect in each segment
+ }{
+ {`localhost`, false, []string{
+ "localhost",
+ }, []int{}},
+
+ {`localhost
+ dir1`, false, []string{
+ "localhost",
+ }, []int{1}},
+
+ {
+ `localhost:1234
+ dir1 foo bar`, false, []string{
+ "localhost:1234",
+ }, []int{3},
+ },
+
+ {`localhost {
+ dir1
+ }`, false, []string{
+ "localhost",
+ }, []int{1}},
+
+ {`localhost:1234 {
+ dir1 foo bar
+ dir2
+ }`, false, []string{
+ "localhost:1234",
+ }, []int{3, 1}},
+
+ {`http://localhost https://localhost
+ dir1 foo bar`, false, []string{
+ "http://localhost",
+ "https://localhost",
+ }, []int{3}},
+
+ {`http://localhost https://localhost {
+ dir1 foo bar
+ }`, false, []string{
+ "http://localhost",
+ "https://localhost",
+ }, []int{3}},
+
+ {`http://localhost, https://localhost {
+ dir1 foo bar
+ }`, false, []string{
+ "http://localhost",
+ "https://localhost",
+ }, []int{3}},
+
+ {`http://localhost, {
+ }`, true, []string{
+ "http://localhost",
+ }, []int{}},
+
+ {`host1:80, http://host2.com
+ dir1 foo bar
+ dir2 baz`, false, []string{
+ "host1:80",
+ "http://host2.com",
+ }, []int{3, 2}},
+
+ {`http://host1.com,
+ http://host2.com,
+ https://host3.com`, false, []string{
+ "http://host1.com",
+ "http://host2.com",
+ "https://host3.com",
+ }, []int{}},
+
+ {`http://host1.com:1234, https://host2.com
+ dir1 foo {
+ bar baz
+ }
+ dir2`, false, []string{
+ "http://host1.com:1234",
+ "https://host2.com",
+ }, []int{6, 1}},
+
+ {`127.0.0.1
+ dir1 {
+ bar baz
+ }
+ dir2 {
+ foo bar
+ }`, false, []string{
+ "127.0.0.1",
+ }, []int{5, 5}},
+
+ {`localhost
+ dir1 {
+ foo`, true, []string{
+ "localhost",
+ }, []int{3}},
+
+ {`localhost
+ dir1 {
+ }`, false, []string{
+ "localhost",
+ }, []int{3}},
+
+ {`localhost
+ dir1 {
+ } }`, true, []string{
+ "localhost",
+ }, []int{}},
+
+ {`localhost{
+ dir1
+ }`, true, []string{}, []int{}},
+
+ {`localhost
+ dir1 {
+ nested {
+ foo
+ }
+ }
+ dir2 foo bar`, false, []string{
+ "localhost",
+ }, []int{7, 3}},
+
+ {``, false, []string{}, []int{}},
+
+ {`localhost
+ dir1 arg1
+ import testdata/import_test1.txt`, false, []string{
+ "localhost",
+ }, []int{2, 3, 1}},
+
+ {`import testdata/import_test2.txt`, false, []string{
+ "host1",
+ }, []int{1, 2}},
+
+ {`import testdata/not_found.txt`, true, []string{}, []int{}},
+
+ // empty file should just log a warning, and result in no tokens
+ {`import testdata/empty.txt`, false, []string{}, []int{}},
+
+ {`import testdata/only_white_space.txt`, false, []string{}, []int{}},
+
+ // import path/to/dir/* should skip any files that start with a . when iterating over them.
+ {`localhost
+ dir1 arg1
+ import testdata/glob/*`, false, []string{
+ "localhost",
+ }, []int{2, 3, 1}},
+
+ // import path/to/dir/.* should continue to read all dotfiles in a dir.
+ {`import testdata/glob/.*`, false, []string{
+ "host1",
+ }, []int{1, 2}},
+
+ {`""`, false, []string{}, []int{}},
+
+ {``, false, []string{}, []int{}},
+
+ // Unexpected next token after '{' on same line
+ {`localhost
+ dir1 { a b }`, true, []string{"localhost"}, []int{}},
+
+ // Unexpected '{' on a new line
+ {`localhost
+ dir1
+ {
+ a b
+ }`, true, []string{"localhost"}, []int{}},
+
+ // Workaround with quotes
+ {`localhost
+ dir1 "{" a b "}"`, false, []string{"localhost"}, []int{5}},
+
+ // Unexpected '{}' at end of line
+ {`localhost
+ dir1 {}`, true, []string{"localhost"}, []int{}},
+ // Workaround with quotes
+ {`localhost
+ dir1 "{}"`, false, []string{"localhost"}, []int{2}},
+
+ // import with args
+ {`import testdata/import_args0.txt a`, false, []string{"a"}, []int{}},
+ {`import testdata/import_args1.txt a b`, false, []string{"a", "b"}, []int{}},
+ {`import testdata/import_args*.txt a b`, false, []string{"a"}, []int{2}},
+
+ // test cases found by fuzzing!
+ {`import }{$"`, true, []string{}, []int{}},
+ {`import /*/*.txt`, true, []string{}, []int{}},
+ {`import /???/?*?o`, true, []string{}, []int{}},
+ {`import /??`, true, []string{}, []int{}},
+ {`import /[a-z]`, true, []string{}, []int{}},
+ {`import {$}`, true, []string{}, []int{}},
+ {`import {%}`, true, []string{}, []int{}},
+ {`import {$$}`, true, []string{}, []int{}},
+ {`import {%%}`, true, []string{}, []int{}},
+ } {
+ result, err := testParseOne(test.input)
+
+ if test.shouldErr && err == nil {
+ t.Errorf("Test %d: Expected an error, but didn't get one", i)
+ }
+ if !test.shouldErr && err != nil {
+ t.Errorf("Test %d: Expected no error, but got: %v", i, err)
+ }
+
+ // t.Logf("%+v\n", result)
+ if len(result.Keys) != len(test.keys) {
+ t.Errorf("Test %d: Expected %d keys, got %d",
+ i, len(test.keys), len(result.Keys))
+ continue
+ }
+ for j, addr := range result.GetKeysText() {
+ if addr != test.keys[j] {
+ t.Errorf("Test %d, key %d: Expected '%s', but was '%s'",
+ i, j, test.keys[j], addr)
+ }
+ }
+
+ if len(result.Segments) != len(test.numTokens) {
+ t.Errorf("Test %d: Expected %d segments, had %d",
+ i, len(test.numTokens), len(result.Segments))
+ continue
+ }
+
+ for j, seg := range result.Segments {
+ if len(seg) != test.numTokens[j] {
+ t.Errorf("Test %d, segment %d: Expected %d tokens, counted %d",
+ i, j, test.numTokens[j], len(seg))
+ continue
+ }
+ }
+ }
+}
+
+func TestRecursiveImport(t *testing.T) {
+ testParseOne := func(input string) (ServerBlock, error) {
+ p := testParser(input)
+ p.Next() // parseOne doesn't call Next() to start, so we must
+ err := p.parseOne()
+ return p.block, err
+ }
+
+ isExpected := func(got ServerBlock) bool {
+ textKeys := got.GetKeysText()
+ if len(textKeys) != 1 || textKeys[0] != "localhost" {
+ t.Errorf("got keys unexpected: expect localhost, got %v", textKeys)
+ return false
+ }
+ if len(got.Segments) != 2 {
+ t.Errorf("got wrong number of segments: expect 2, got %d", len(got.Segments))
+ return false
+ }
+ if len(got.Segments[0]) != 1 || len(got.Segments[1]) != 2 {
+ t.Errorf("got unexpected tokens: %v", got.Segments)
+ return false
+ }
+ return true
+ }
+
+ recursiveFile1, err := filepath.Abs("testdata/recursive_import_test1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ recursiveFile2, err := filepath.Abs("testdata/recursive_import_test2")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test relative recursive import
+ err = os.WriteFile(recursiveFile1, []byte(
+ `localhost
+ dir1
+ import recursive_import_test2`), 0o644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(recursiveFile1)
+
+ err = os.WriteFile(recursiveFile2, []byte("dir2 1"), 0o644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(recursiveFile2)
+
+ // import absolute path
+ result, err := testParseOne("import " + recursiveFile1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !isExpected(result) {
+ t.Error("absolute+relative import failed")
+ }
+
+ // import relative path
+ result, err = testParseOne("import testdata/recursive_import_test1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !isExpected(result) {
+ t.Error("relative+relative import failed")
+ }
+
+ // test absolute recursive import
+ err = os.WriteFile(recursiveFile1, []byte(
+ `localhost
+ dir1
+ import `+recursiveFile2), 0o644)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // import absolute path
+ result, err = testParseOne("import " + recursiveFile1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !isExpected(result) {
+ t.Error("absolute+absolute import failed")
+ }
+
+ // import relative path
+ result, err = testParseOne("import testdata/recursive_import_test1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !isExpected(result) {
+ t.Error("relative+absolute import failed")
+ }
+}
+
+func TestDirectiveImport(t *testing.T) {
+ testParseOne := func(input string) (ServerBlock, error) {
+ p := testParser(input)
+ p.Next() // parseOne doesn't call Next() to start, so we must
+ err := p.parseOne()
+ return p.block, err
+ }
+
+ isExpected := func(got ServerBlock) bool {
+ textKeys := got.GetKeysText()
+ if len(textKeys) != 1 || textKeys[0] != "localhost" {
+ t.Errorf("got keys unexpected: expect localhost, got %v", textKeys)
+ return false
+ }
+ if len(got.Segments) != 2 {
+ t.Errorf("got wrong number of segments: expect 2, got %d", len(got.Segments))
+ return false
+ }
+ if len(got.Segments[0]) != 1 || len(got.Segments[1]) != 8 {
+ t.Errorf("got unexpected tokens: %v", got.Segments)
+ return false
+ }
+ return true
+ }
+
+ directiveFile, err := filepath.Abs("testdata/directive_import_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = os.WriteFile(directiveFile, []byte(`prop1 1
+ prop2 2`), 0o644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(directiveFile)
+
+ // import from existing file
+ result, err := testParseOne(`localhost
+ dir1
+ proxy {
+ import testdata/directive_import_test
+ transparent
+ }`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !isExpected(result) {
+ t.Error("directive import failed")
+ }
+
+ // import from nonexistent file
+ _, err = testParseOne(`localhost
+ dir1
+ proxy {
+ import testdata/nonexistent_file
+ transparent
+ }`)
+ if err == nil {
+ t.Fatal("expected error when importing a nonexistent file")
+ }
+}
+
+func TestParseAll(t *testing.T) {
+ for i, test := range []struct {
+ input string
+ shouldErr bool
+ keys [][]string // keys per server block, in order
+ }{
+ {`localhost`, false, [][]string{
+ {"localhost"},
+ }},
+
+ {`localhost:1234`, false, [][]string{
+ {"localhost:1234"},
+ }},
+
+ {`localhost:1234 {
+ }
+ localhost:2015 {
+ }`, false, [][]string{
+ {"localhost:1234"},
+ {"localhost:2015"},
+ }},
+
+ {`localhost:1234, http://host2`, false, [][]string{
+ {"localhost:1234", "http://host2"},
+ }},
+
+ {`foo.example.com , example.com`, false, [][]string{
+ {"foo.example.com", "example.com"},
+ }},
+
+ {`localhost:1234, http://host2,`, true, [][]string{}},
+
+ {`http://host1.com, http://host2.com {
+ }
+ https://host3.com, https://host4.com {
+ }`, false, [][]string{
+ {"http://host1.com", "http://host2.com"},
+ {"https://host3.com", "https://host4.com"},
+ }},
+
+ {`import testdata/import_glob*.txt`, false, [][]string{
+ {"glob0.host0"},
+ {"glob0.host1"},
+ {"glob1.host0"},
+ {"glob2.host0"},
+ }},
+
+ {`import notfound/*`, false, [][]string{}}, // glob needn't error with no matches
+ {`import notfound/file.conf`, true, [][]string{}}, // but a specific file should
+
+ // recursive self-import
+ {`import testdata/import_recursive0.txt`, true, [][]string{}},
+ {`import testdata/import_recursive3.txt
+ import testdata/import_recursive1.txt`, true, [][]string{}},
+
+ // cyclic imports
+ {`(A) {
+ import A
+ }
+ :80
+ import A
+ `, true, [][]string{}},
+ {`(A) {
+ import B
+ }
+ (B) {
+ import A
+ }
+ :80
+ import A
+ `, true, [][]string{}},
+ } {
+ p := testParser(test.input)
+ blocks, err := p.parseAll()
+
+ if test.shouldErr && err == nil {
+ t.Errorf("Test %d: Expected an error, but didn't get one", i)
+ }
+ if !test.shouldErr && err != nil {
+ t.Errorf("Test %d: Expected no error, but got: %v", i, err)
+ }
+
+ if len(blocks) != len(test.keys) {
+ t.Errorf("Test %d: Expected %d server blocks, got %d",
+ i, len(test.keys), len(blocks))
+ continue
+ }
+ for j, block := range blocks {
+ if len(block.Keys) != len(test.keys[j]) {
+ t.Errorf("Test %d: Expected %d keys in block %d, got %d: %v",
+ i, len(test.keys[j]), j, len(block.Keys), block.Keys)
+ continue
+ }
+ for k, addr := range block.GetKeysText() {
+ if addr != test.keys[j][k] {
+ t.Errorf("Test %d, block %d, key %d: Expected '%s', but got '%s'",
+ i, j, k, test.keys[j][k], addr)
+ }
+ }
+ }
+ }
+}
+
+func TestEnvironmentReplacement(t *testing.T) {
+ os.Setenv("FOOBAR", "foobar")
+ os.Setenv("CHAINED", "$FOOBAR")
+
+ for i, test := range []struct {
+ input string
+ expect string
+ }{
+ {
+ input: "",
+ expect: "",
+ },
+ {
+ input: "foo",
+ expect: "foo",
+ },
+ {
+ input: "{$NOT_SET}",
+ expect: "",
+ },
+ {
+ input: "foo{$NOT_SET}bar",
+ expect: "foobar",
+ },
+ {
+ input: "{$FOOBAR}",
+ expect: "foobar",
+ },
+ {
+ input: "foo {$FOOBAR} bar",
+ expect: "foo foobar bar",
+ },
+ {
+ input: "foo{$FOOBAR}bar",
+ expect: "foofoobarbar",
+ },
+ {
+ input: "foo\n{$FOOBAR}\nbar",
+ expect: "foo\nfoobar\nbar",
+ },
+ {
+ input: "{$FOOBAR} {$FOOBAR}",
+ expect: "foobar foobar",
+ },
+ {
+ input: "{$FOOBAR}{$FOOBAR}",
+ expect: "foobarfoobar",
+ },
+ {
+ input: "{$CHAINED}",
+ expect: "$FOOBAR", // should not chain env expands
+ },
+ {
+ input: "{$FOO:default}",
+ expect: "default",
+ },
+ {
+ input: "foo{$BAR:bar}baz",
+ expect: "foobarbaz",
+ },
+ {
+ input: "foo{$BAR:$FOOBAR}baz",
+ expect: "foo$FOOBARbaz", // should not chain env expands
+ },
+ {
+ input: "{$FOOBAR",
+ expect: "{$FOOBAR",
+ },
+ {
+ input: "{$LONGER_NAME $FOOBAR}",
+ expect: "",
+ },
+ {
+ input: "{$}",
+ expect: "{$}",
+ },
+ {
+ input: "{$$}",
+ expect: "",
+ },
+ {
+ input: "{$",
+ expect: "{$",
+ },
+ {
+ input: "}{$",
+ expect: "}{$",
+ },
+ } {
+ actual := replaceEnvVars([]byte(test.input))
+ if !bytes.Equal(actual, []byte(test.expect)) {
+ t.Errorf("Test %d: Expected: '%s' but got '%s'", i, test.expect, actual)
+ }
+ }
+}
+
+func TestImportReplacementInJSONWithBrace(t *testing.T) {
+ for i, test := range []struct {
+ args []string
+ input string
+ expect string
+ }{
+ {
+ args: []string{"123"},
+ input: "{args[0]}",
+ expect: "123",
+ },
+ {
+ args: []string{"123"},
+ input: `{"key":"{args[0]}"}`,
+ expect: `{"key":"123"}`,
+ },
+ {
+ args: []string{"123", "123"},
+ input: `{"key":[{args[0]},{args[1]}]}`,
+ expect: `{"key":[123,123]}`,
+ },
+ } {
+ repl := makeArgsReplacer(test.args)
+ actual := repl.ReplaceKnown(test.input, "")
+ if actual != test.expect {
+ t.Errorf("Test %d: Expected: '%s' but got '%s'", i, test.expect, actual)
+ }
+ }
+}
+
+func TestSnippets(t *testing.T) {
+ p := testParser(`
+ (common) {
+ gzip foo
+ errors stderr
+ }
+ http://example.com {
+ import common
+ }
+ `)
+ blocks, err := p.parseAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(blocks) != 1 {
+ t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
+ }
+ if actual, expected := blocks[0].GetKeysText()[0], "http://example.com"; expected != actual {
+ t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
+ }
+ if len(blocks[0].Segments) != 2 {
+ t.Fatalf("Server block should have tokens from import, got: %+v", blocks[0])
+ }
+ if actual, expected := blocks[0].Segments[0][0].Text, "gzip"; expected != actual {
+ t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
+ }
+ if actual, expected := blocks[0].Segments[1][1].Text, "stderr"; expected != actual {
+ t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
+ }
+}
+
+func writeStringToTempFileOrDie(t *testing.T, str string) (pathToFile string) {
+ file, err := os.CreateTemp("", t.Name())
+ if err != nil {
+ panic(err) // get a stack trace so we know where this was called from.
+ }
+ if _, err := file.WriteString(str); err != nil {
+ panic(err)
+ }
+ if err := file.Close(); err != nil {
+ panic(err)
+ }
+ return file.Name()
+}
+
+func TestImportedFilesIgnoreNonDirectiveImportTokens(t *testing.T) {
+ fileName := writeStringToTempFileOrDie(t, `
+ http://example.com {
+ # This isn't an import directive, it's just an arg with value 'import'
+ basic_auth / import password
+ }
+ `)
+ // Parse the root file that imports the other one.
+ p := testParser(`import ` + fileName)
+ blocks, err := p.parseAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+ auth := blocks[0].Segments[0]
+ line := auth[0].Text + " " + auth[1].Text + " " + auth[2].Text + " " + auth[3].Text
+ if line != "basic_auth / import password" {
+ // Previously, it would be changed to:
+ // basic_auth / import /path/to/test/dir/password
+ // referencing a file that (probably) doesn't exist and changing the
+ // password!
+ t.Errorf("Expected basic_auth tokens to be 'basic_auth / import password' but got %#q", line)
+ }
+}
+
+func TestSnippetAcrossMultipleFiles(t *testing.T) {
+ // Make the derived Caddyfile that expects (common) to be defined.
+ fileName := writeStringToTempFileOrDie(t, `
+ http://example.com {
+ import common
+ }
+ `)
+
+ // Parse the root file that defines (common) and then imports the other one.
+ p := testParser(`
+ (common) {
+ gzip foo
+ }
+ import ` + fileName + `
+ `)
+
+ blocks, err := p.parseAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(blocks) != 1 {
+ t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
+ }
+ if actual, expected := blocks[0].GetKeysText()[0], "http://example.com"; expected != actual {
+ t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
+ }
+ if len(blocks[0].Segments) != 1 {
+ t.Fatalf("Server block should have tokens from import")
+ }
+ if actual, expected := blocks[0].Segments[0][0].Text, "gzip"; expected != actual {
+ t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
+ }
+}
+
+func TestRejectsGlobalMatcher(t *testing.T) {
+ p := testParser(`
+ @rejected path /foo
+
+ (common) {
+ gzip foo
+ errors stderr
+ }
+
+ http://example.com {
+ import common
+ }
+ `)
+ _, err := p.parseAll()
+ if err == nil {
+ t.Fatal("Expected an error, but got nil")
+ }
+ expected := "request matchers may not be defined globally, they must be in a site block; found @rejected, at Testfile:2"
+ if err.Error() != expected {
+ t.Errorf("Expected error to be '%s' but got '%v'", expected, err)
+ }
+}
+
+func testParser(input string) parser {
+ return parser{Dispenser: NewTestDispenser(input)}
+}
diff --git a/caddyhttp/rewrite/testdata/testdir/empty b/caddyconfig/caddyfile/testdata/empty.txt
similarity index 100%
rename from caddyhttp/rewrite/testdata/testdir/empty
rename to caddyconfig/caddyfile/testdata/empty.txt
diff --git a/caddyconfig/caddyfile/testdata/glob/.dotfile.txt b/caddyconfig/caddyfile/testdata/glob/.dotfile.txt
new file mode 100644
index 00000000000..faab100c604
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/glob/.dotfile.txt
@@ -0,0 +1,4 @@
+host1 {
+ dir1
+ dir2 arg1
+}
diff --git a/caddyfile/testdata/import_test1.txt b/caddyconfig/caddyfile/testdata/glob/import_test1.txt
similarity index 100%
rename from caddyfile/testdata/import_test1.txt
rename to caddyconfig/caddyfile/testdata/glob/import_test1.txt
diff --git a/caddyconfig/caddyfile/testdata/import_args0.txt b/caddyconfig/caddyfile/testdata/import_args0.txt
new file mode 100644
index 00000000000..add211e378d
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_args0.txt
@@ -0,0 +1 @@
+{args[0]}
\ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_args1.txt b/caddyconfig/caddyfile/testdata/import_args1.txt
new file mode 100644
index 00000000000..422692a2c23
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_args1.txt
@@ -0,0 +1 @@
+{args[0]} {args[1]}
\ No newline at end of file
diff --git a/caddyfile/testdata/import_glob0.txt b/caddyconfig/caddyfile/testdata/import_glob0.txt
similarity index 100%
rename from caddyfile/testdata/import_glob0.txt
rename to caddyconfig/caddyfile/testdata/import_glob0.txt
diff --git a/caddyfile/testdata/import_glob1.txt b/caddyconfig/caddyfile/testdata/import_glob1.txt
similarity index 100%
rename from caddyfile/testdata/import_glob1.txt
rename to caddyconfig/caddyfile/testdata/import_glob1.txt
diff --git a/caddyfile/testdata/import_glob2.txt b/caddyconfig/caddyfile/testdata/import_glob2.txt
similarity index 100%
rename from caddyfile/testdata/import_glob2.txt
rename to caddyconfig/caddyfile/testdata/import_glob2.txt
diff --git a/caddyconfig/caddyfile/testdata/import_recursive0.txt b/caddyconfig/caddyfile/testdata/import_recursive0.txt
new file mode 100644
index 00000000000..4d827b33ecb
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_recursive0.txt
@@ -0,0 +1 @@
+import import_recursive0.txt
\ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_recursive1.txt b/caddyconfig/caddyfile/testdata/import_recursive1.txt
new file mode 100644
index 00000000000..9b6102ed75c
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_recursive1.txt
@@ -0,0 +1 @@
+import import_recursive2.txt
\ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_recursive2.txt b/caddyconfig/caddyfile/testdata/import_recursive2.txt
new file mode 100644
index 00000000000..5553dea3876
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_recursive2.txt
@@ -0,0 +1 @@
+import import_recursive3.txt
\ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_recursive3.txt b/caddyconfig/caddyfile/testdata/import_recursive3.txt
new file mode 100644
index 00000000000..fcf0237f6c0
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_recursive3.txt
@@ -0,0 +1 @@
+import import_recursive1.txt
\ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_test1.txt b/caddyconfig/caddyfile/testdata/import_test1.txt
new file mode 100644
index 00000000000..dac7b29be09
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_test1.txt
@@ -0,0 +1,2 @@
+dir2 arg1 arg2
+dir3
\ No newline at end of file
diff --git a/caddyfile/testdata/import_test2.txt b/caddyconfig/caddyfile/testdata/import_test2.txt
similarity index 100%
rename from caddyfile/testdata/import_test2.txt
rename to caddyconfig/caddyfile/testdata/import_test2.txt
diff --git a/caddyconfig/caddyfile/testdata/only_white_space.txt b/caddyconfig/caddyfile/testdata/only_white_space.txt
new file mode 100644
index 00000000000..705327cd431
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/only_white_space.txt
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
diff --git a/caddyconfig/configadapters.go b/caddyconfig/configadapters.go
new file mode 100644
index 00000000000..0ca3c3af13f
--- /dev/null
+++ b/caddyconfig/configadapters.go
@@ -0,0 +1,138 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyconfig
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+// Adapter is a type which can adapt a configuration to Caddy JSON.
+// It returns the results and any warnings, or an error.
+type Adapter interface {
+ Adapt(body []byte, options map[string]any) ([]byte, []Warning, error)
+}
+
+// Warning represents a warning or notice related to conversion.
+type Warning struct {
+ File string `json:"file,omitempty"`
+ Line int `json:"line,omitempty"`
+ Directive string `json:"directive,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+func (w Warning) String() string {
+ var directive string
+ if w.Directive != "" {
+ directive = fmt.Sprintf(" (%s)", w.Directive)
+ }
+ return fmt.Sprintf("%s:%d%s: %s", w.File, w.Line, directive, w.Message)
+}
+
+// JSON encodes val as JSON, returning it as a json.RawMessage. Any
+// marshaling errors (which are highly unlikely with correct code)
+// are converted to warnings. This is convenient when filling config
+// structs that require a json.RawMessage, without having to worry
+// about errors.
+func JSON(val any, warnings *[]Warning) json.RawMessage {
+ b, err := json.Marshal(val)
+ if err != nil {
+ if warnings != nil {
+ *warnings = append(*warnings, Warning{Message: err.Error()})
+ }
+ return nil
+ }
+ return b
+}
+
+// JSONModuleObject is like JSON(), except it marshals val into a JSON object
+// with an added key named fieldName with the value fieldVal. This is useful
+// for encoding module values where the module name has to be described within
+// the object by a certain key; for example, `"handler": "file_server"` for a
+// file server HTTP handler (fieldName="handler" and fieldVal="file_server").
+// The val parameter must encode into a map[string]any (i.e. it must be
+// a struct or map). Any errors are converted into warnings.
+func JSONModuleObject(val any, fieldName, fieldVal string, warnings *[]Warning) json.RawMessage {
+ // encode to a JSON object first
+ enc, err := json.Marshal(val)
+ if err != nil {
+ if warnings != nil {
+ *warnings = append(*warnings, Warning{Message: err.Error()})
+ }
+ return nil
+ }
+
+ // then decode the object
+ var tmp map[string]any
+ err = json.Unmarshal(enc, &tmp)
+ if err != nil {
+ if warnings != nil {
+ *warnings = append(*warnings, Warning{Message: err.Error()})
+ }
+ return nil
+ }
+
+ // so we can easily add the module's field with its appointed value
+ tmp[fieldName] = fieldVal
+
+ // then re-marshal as JSON
+ result, err := json.Marshal(tmp)
+ if err != nil {
+ if warnings != nil {
+ *warnings = append(*warnings, Warning{Message: err.Error()})
+ }
+ return nil
+ }
+
+ return result
+}
+
+// RegisterAdapter registers a config adapter with the given name.
+// This should usually be done at init-time. It panics if the
+// adapter cannot be registered successfully.
+func RegisterAdapter(name string, adapter Adapter) {
+ if _, ok := configAdapters[name]; ok {
+ panic(fmt.Errorf("%s: already registered", name))
+ }
+ configAdapters[name] = adapter
+ caddy.RegisterModule(adapterModule{name, adapter})
+}
+
+// GetAdapter returns the adapter with the given name,
+// or nil if one with that name is not registered.
+func GetAdapter(name string) Adapter {
+ return configAdapters[name]
+}
+
+// adapterModule is a wrapper type that can turn any config
+// adapter into a Caddy module, which has the benefit of being
+// counted with other modules, even though they do not
+// technically extend the Caddy configuration structure.
+// See caddyserver/caddy#3132.
+type adapterModule struct {
+ name string
+ Adapter
+}
+
+func (am adapterModule) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: caddy.ModuleID("caddy.adapters." + am.name),
+ New: func() caddy.Module { return am },
+ }
+}
+
+var configAdapters = make(map[string]Adapter)
diff --git a/caddyconfig/httpcaddyfile/addresses.go b/caddyconfig/httpcaddyfile/addresses.go
new file mode 100644
index 00000000000..1121776d98f
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/addresses.go
@@ -0,0 +1,501 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "fmt"
+ "net"
+ "net/netip"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/caddyserver/certmagic"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+// mapAddressToProtocolToServerBlocks returns a map of listener address to list of server
+// blocks that will be served on that address. To do this, each server block is
+// expanded so that each one is considered individually, although keys of a
+// server block that share the same address stay grouped together so the config
+// isn't repeated unnecessarily. For example, this Caddyfile:
+//
+// example.com {
+// bind 127.0.0.1
+// }
+// www.example.com, example.net/path, localhost:9999 {
+// bind 127.0.0.1 1.2.3.4
+// }
+//
+// has two server blocks to start with. But expressed in this Caddyfile are
+// actually 4 listener addresses: 127.0.0.1:443, 1.2.3.4:443, 127.0.0.1:9999,
+// and 127.0.0.1:9999. This is because the bind directive is applied to each
+// key of its server block (specifying the host part), and each key may have
+// a different port. And we definitely need to be sure that a site which is
+// bound to be served on a specific interface is not served on others just
+// because that is more convenient: it would be a potential security risk
+// if the difference between interfaces means private vs. public.
+//
+// So what this function does for the example above is iterate each server
+// block, and for each server block, iterate its keys. For the first, it
+// finds one key (example.com) and determines its listener address
+// (127.0.0.1:443 - because of 'bind' and automatic HTTPS). It then adds
+// the listener address to the map value returned by this function, with
+// the first server block as one of its associations.
+//
+// It then iterates each key on the second server block and associates them
+// with one or more listener addresses. Indeed, each key in this block has
+// two listener addresses because of the 'bind' directive. Once we know
+// which addresses serve which keys, we can create a new server block for
+// each address containing the contents of the server block and only those
+// specific keys of the server block which use that address.
+//
+// It is possible and even likely that some keys in the returned map have
+// the exact same list of server blocks (i.e. they are identical). This
+// happens when multiple hosts are declared with a 'bind' directive and
+// the resulting listener addresses are not shared by any other server
+// block (or the other server blocks are exactly identical in their token
+// contents). This happens with our example above because 1.2.3.4:443
+// and 1.2.3.4:9999 are used exclusively with the second server block. This
+// repetition may be undesirable, so call consolidateAddrMappings() to map
+// multiple addresses to the same lists of server blocks (a many:many mapping).
+// (Doing this is essentially a map-reduce technique.)
+func (st *ServerType) mapAddressToProtocolToServerBlocks(originalServerBlocks []serverBlock,
+ options map[string]any,
+) (map[string]map[string][]serverBlock, error) {
+ addrToProtocolToServerBlocks := map[string]map[string][]serverBlock{}
+
+ type keyWithParsedKey struct {
+ key caddyfile.Token
+ parsedKey Address
+ }
+
+ for i, sblock := range originalServerBlocks {
+ // within a server block, we need to map all the listener addresses
+ // implied by the server block to the keys of the server block which
+ // will be served by them; this has the effect of treating each
+ // key of a server block as its own, but without having to repeat its
+ // contents in cases where multiple keys really can be served together
+ addrToProtocolToKeyWithParsedKeys := map[string]map[string][]keyWithParsedKey{}
+ for j, key := range sblock.block.Keys {
+ parsedKey, err := ParseAddress(key.Text)
+ if err != nil {
+ return nil, fmt.Errorf("parsing key: %v", err)
+ }
+ parsedKey = parsedKey.Normalize()
+
+ // a key can have multiple listener addresses if there are multiple
+ // arguments to the 'bind' directive (although they will all have
+ // the same port, since the port is defined by the key or is implicit
+ // through automatic HTTPS)
+ listeners, err := st.listenersForServerBlockAddress(sblock, parsedKey, options)
+ if err != nil {
+ return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key.Text, err)
+ }
+
+ // associate this key with its protocols and each listener address served with them
+ kwpk := keyWithParsedKey{key, parsedKey}
+ for addr, protocols := range listeners {
+ protocolToKeyWithParsedKeys, ok := addrToProtocolToKeyWithParsedKeys[addr]
+ if !ok {
+ protocolToKeyWithParsedKeys = map[string][]keyWithParsedKey{}
+ addrToProtocolToKeyWithParsedKeys[addr] = protocolToKeyWithParsedKeys
+ }
+
+ // an empty protocol indicates the default, a nil or empty value in the ListenProtocols array
+ if len(protocols) == 0 {
+ protocols[""] = struct{}{}
+ }
+ for prot := range protocols {
+ protocolToKeyWithParsedKeys[prot] = append(
+ protocolToKeyWithParsedKeys[prot],
+ kwpk)
+ }
+ }
+ }
+
+ // make a slice of the map keys so we can iterate in sorted order
+ addrs := make([]string, 0, len(addrToProtocolToKeyWithParsedKeys))
+ for addr := range addrToProtocolToKeyWithParsedKeys {
+ addrs = append(addrs, addr)
+ }
+ sort.Strings(addrs)
+
+ // now that we know which addresses serve which keys of this
+ // server block, we iterate that mapping and create a list of
+ // new server blocks for each address where the keys of the
+ // server block are only the ones which use the address; but
+ // the contents (tokens) are of course the same
+ for _, addr := range addrs {
+ protocolToKeyWithParsedKeys := addrToProtocolToKeyWithParsedKeys[addr]
+
+ prots := make([]string, 0, len(protocolToKeyWithParsedKeys))
+ for prot := range protocolToKeyWithParsedKeys {
+ prots = append(prots, prot)
+ }
+ sort.Strings(prots)
+
+ protocolToServerBlocks, ok := addrToProtocolToServerBlocks[addr]
+ if !ok {
+ protocolToServerBlocks = map[string][]serverBlock{}
+ addrToProtocolToServerBlocks[addr] = protocolToServerBlocks
+ }
+
+ for _, prot := range prots {
+ keyWithParsedKeys := protocolToKeyWithParsedKeys[prot]
+
+ keys := make([]caddyfile.Token, len(keyWithParsedKeys))
+ parsedKeys := make([]Address, len(keyWithParsedKeys))
+
+ for k, keyWithParsedKey := range keyWithParsedKeys {
+ keys[k] = keyWithParsedKey.key
+ parsedKeys[k] = keyWithParsedKey.parsedKey
+ }
+
+ protocolToServerBlocks[prot] = append(protocolToServerBlocks[prot], serverBlock{
+ block: caddyfile.ServerBlock{
+ Keys: keys,
+ Segments: sblock.block.Segments,
+ },
+ pile: sblock.pile,
+ parsedKeys: parsedKeys,
+ })
+ }
+ }
+ }
+
+ return addrToProtocolToServerBlocks, nil
+}
+
+// consolidateAddrMappings eliminates repetition of identical server blocks in a mapping of
+// single listener addresses to protocols to lists of server blocks. Since multiple addresses
+// may serve multiple protocols to identical sites (server block contents), this function turns
+// a 1:many mapping into a many:many mapping. Server block contents (tokens) must be
+// exactly identical so that reflect.DeepEqual returns true in order for the addresses to be combined.
+// Identical entries are deleted from the addrToServerBlocks map. Essentially, each pairing (each
+// association from multiple addresses to multiple server blocks; i.e. each element of
+// the returned slice) becomes a server definition in the output JSON.
+func (st *ServerType) consolidateAddrMappings(addrToProtocolToServerBlocks map[string]map[string][]serverBlock) []sbAddrAssociation {
+ sbaddrs := make([]sbAddrAssociation, 0, len(addrToProtocolToServerBlocks))
+
+ addrs := make([]string, 0, len(addrToProtocolToServerBlocks))
+ for addr := range addrToProtocolToServerBlocks {
+ addrs = append(addrs, addr)
+ }
+ sort.Strings(addrs)
+
+ for _, addr := range addrs {
+ protocolToServerBlocks := addrToProtocolToServerBlocks[addr]
+
+ prots := make([]string, 0, len(protocolToServerBlocks))
+ for prot := range protocolToServerBlocks {
+ prots = append(prots, prot)
+ }
+ sort.Strings(prots)
+
+ for _, prot := range prots {
+ serverBlocks := protocolToServerBlocks[prot]
+
+ // now find other addresses that map to identical
+ // server blocks and add them to our map of listener
+ // addresses and protocols, while removing them from
+ // the original map
+ listeners := map[string]map[string]struct{}{}
+
+ for otherAddr, otherProtocolToServerBlocks := range addrToProtocolToServerBlocks {
+ for otherProt, otherServerBlocks := range otherProtocolToServerBlocks {
+ if addr == otherAddr && prot == otherProt || reflect.DeepEqual(serverBlocks, otherServerBlocks) {
+ listener, ok := listeners[otherAddr]
+ if !ok {
+ listener = map[string]struct{}{}
+ listeners[otherAddr] = listener
+ }
+ listener[otherProt] = struct{}{}
+ delete(otherProtocolToServerBlocks, otherProt)
+ }
+ }
+ }
+
+ addresses := make([]string, 0, len(listeners))
+ for lnAddr := range listeners {
+ addresses = append(addresses, lnAddr)
+ }
+ sort.Strings(addresses)
+
+ addressesWithProtocols := make([]addressWithProtocols, 0, len(listeners))
+
+ for _, lnAddr := range addresses {
+ lnProts := listeners[lnAddr]
+ prots := make([]string, 0, len(lnProts))
+ for prot := range lnProts {
+ prots = append(prots, prot)
+ }
+ sort.Strings(prots)
+
+ addressesWithProtocols = append(addressesWithProtocols, addressWithProtocols{
+ address: lnAddr,
+ protocols: prots,
+ })
+ }
+
+ sbaddrs = append(sbaddrs, sbAddrAssociation{
+ addressesWithProtocols: addressesWithProtocols,
+ serverBlocks: serverBlocks,
+ })
+ }
+ }
+
+ return sbaddrs
+}
+
+// listenersForServerBlockAddress essentially converts the Caddyfile site addresses to a map from
+// Caddy listener addresses and the protocols to serve them with to the parsed address for each server block.
+func (st *ServerType) listenersForServerBlockAddress(sblock serverBlock, addr Address,
+ options map[string]any,
+) (map[string]map[string]struct{}, error) {
+ switch addr.Scheme {
+ case "wss":
+ return nil, fmt.Errorf("the scheme wss:// is only supported in browsers; use https:// instead")
+ case "ws":
+ return nil, fmt.Errorf("the scheme ws:// is only supported in browsers; use http:// instead")
+ case "https", "http", "":
+ // Do nothing or handle the valid schemes
+ default:
+ return nil, fmt.Errorf("unsupported URL scheme %s://", addr.Scheme)
+ }
+
+ // figure out the HTTP and HTTPS ports; either
+ // use defaults, or override with user config
+ httpPort, httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPPort), strconv.Itoa(caddyhttp.DefaultHTTPSPort)
+ if hport, ok := options["http_port"]; ok {
+ httpPort = strconv.Itoa(hport.(int))
+ }
+ if hsport, ok := options["https_port"]; ok {
+ httpsPort = strconv.Itoa(hsport.(int))
+ }
+
+ // default port is the HTTPS port
+ lnPort := httpsPort
+ if addr.Port != "" {
+ // port explicitly defined
+ lnPort = addr.Port
+ } else if addr.Scheme == "http" {
+ // port inferred from scheme
+ lnPort = httpPort
+ }
+
+ // error if scheme and port combination violate convention
+ if (addr.Scheme == "http" && lnPort == httpsPort) || (addr.Scheme == "https" && lnPort == httpPort) {
+ return nil, fmt.Errorf("[%s] scheme and port violate convention", addr.String())
+ }
+
+ // the bind directive specifies hosts (and potentially network), and the protocols to serve them with, but is optional
+ lnCfgVals := make([]addressesWithProtocols, 0, len(sblock.pile["bind"]))
+ for _, cfgVal := range sblock.pile["bind"] {
+ if val, ok := cfgVal.Value.(addressesWithProtocols); ok {
+ lnCfgVals = append(lnCfgVals, val)
+ }
+ }
+ if len(lnCfgVals) == 0 {
+ if defaultBindValues, ok := options["default_bind"].([]ConfigValue); ok {
+ for _, defaultBindValue := range defaultBindValues {
+ lnCfgVals = append(lnCfgVals, defaultBindValue.Value.(addressesWithProtocols))
+ }
+ } else {
+ lnCfgVals = []addressesWithProtocols{{
+ addresses: []string{""},
+ protocols: nil,
+ }}
+ }
+ }
+
+ // use a map to prevent duplication
+ listeners := map[string]map[string]struct{}{}
+ for _, lnCfgVal := range lnCfgVals {
+ for _, lnAddr := range lnCfgVal.addresses {
+ lnNetw, lnHost, _, err := caddy.SplitNetworkAddress(lnAddr)
+ if err != nil {
+ return nil, fmt.Errorf("splitting listener address: %v", err)
+ }
+ networkAddr, err := caddy.ParseNetworkAddress(caddy.JoinNetworkAddress(lnNetw, lnHost, lnPort))
+ if err != nil {
+ return nil, fmt.Errorf("parsing network address: %v", err)
+ }
+ if _, ok := listeners[addr.String()]; !ok {
+ listeners[networkAddr.String()] = map[string]struct{}{}
+ }
+ for _, protocol := range lnCfgVal.protocols {
+ listeners[networkAddr.String()][protocol] = struct{}{}
+ }
+ }
+ }
+
+ return listeners, nil
+}
+
+// addressesWithProtocols associates a list of listen addresses
+// with a list of protocols to serve them with
+type addressesWithProtocols struct {
+ addresses []string
+ protocols []string
+}
+
+// Address represents a site address. It contains
+// the original input value, and the component
+// parts of an address. The component parts may be
+// updated to the correct values as setup proceeds,
+// but the original value should never be changed.
+//
+// The Host field must be in a normalized form.
+type Address struct {
+ Original, Scheme, Host, Port, Path string
+}
+
+// ParseAddress parses an address string into a structured format with separate
+// scheme, host, port, and path portions, as well as the original input string.
+func ParseAddress(str string) (Address, error) {
+ const maxLen = 4096
+ if len(str) > maxLen {
+ str = str[:maxLen]
+ }
+ remaining := strings.TrimSpace(str)
+ a := Address{Original: remaining}
+
+ // extract scheme
+ splitScheme := strings.SplitN(remaining, "://", 2)
+ switch len(splitScheme) {
+ case 0:
+ return a, nil
+ case 1:
+ remaining = splitScheme[0]
+ case 2:
+ a.Scheme = splitScheme[0]
+ remaining = splitScheme[1]
+ }
+
+ // extract host and port
+ hostSplit := strings.SplitN(remaining, "/", 2)
+ if len(hostSplit) > 0 {
+ host, port, err := net.SplitHostPort(hostSplit[0])
+ if err != nil {
+ host, port, err = net.SplitHostPort(hostSplit[0] + ":")
+ if err != nil {
+ host = hostSplit[0]
+ }
+ }
+ a.Host = host
+ a.Port = port
+ }
+ if len(hostSplit) == 2 {
+ // all that remains is the path
+ a.Path = "/" + hostSplit[1]
+ }
+
+ // make sure port is valid
+ if a.Port != "" {
+ if portNum, err := strconv.Atoi(a.Port); err != nil {
+ return Address{}, fmt.Errorf("invalid port '%s': %v", a.Port, err)
+ } else if portNum < 0 || portNum > 65535 {
+ return Address{}, fmt.Errorf("port %d is out of range", portNum)
+ }
+ }
+
+ return a, nil
+}
+
+// String returns a human-readable form of a. It will
+// be a cleaned-up and filled-out URL string.
+func (a Address) String() string {
+ if a.Host == "" && a.Port == "" {
+ return ""
+ }
+ scheme := a.Scheme
+ if scheme == "" {
+ if a.Port == strconv.Itoa(certmagic.HTTPSPort) {
+ scheme = "https"
+ } else {
+ scheme = "http"
+ }
+ }
+ s := scheme
+ if s != "" {
+ s += "://"
+ }
+ if a.Port != "" &&
+ ((scheme == "https" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort)) ||
+ (scheme == "http" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort))) {
+ s += net.JoinHostPort(a.Host, a.Port)
+ } else {
+ s += a.Host
+ }
+ if a.Path != "" {
+ s += a.Path
+ }
+ return s
+}
+
+// Normalize returns a normalized version of a.
+func (a Address) Normalize() Address {
+ path := a.Path
+
+ // ensure host is normalized if it's an IP address
+ host := strings.TrimSpace(a.Host)
+ if ip, err := netip.ParseAddr(host); err == nil {
+ if ip.Is6() && !ip.Is4() && !ip.Is4In6() {
+ host = ip.String()
+ }
+ }
+
+ return Address{
+ Original: a.Original,
+ Scheme: lowerExceptPlaceholders(a.Scheme),
+ Host: lowerExceptPlaceholders(host),
+ Port: a.Port,
+ Path: path,
+ }
+}
+
+// lowerExceptPlaceholders lowercases s except within
+// placeholders (substrings in non-escaped '{ }' spans).
+// See https://github.com/caddyserver/caddy/issues/3264
+func lowerExceptPlaceholders(s string) string {
+ var sb strings.Builder
+ var escaped, inPlaceholder bool
+ for _, ch := range s {
+ if ch == '\\' && !escaped {
+ escaped = true
+ sb.WriteRune(ch)
+ continue
+ }
+ if ch == '{' && !escaped {
+ inPlaceholder = true
+ }
+ if ch == '}' && inPlaceholder && !escaped {
+ inPlaceholder = false
+ }
+ if inPlaceholder {
+ sb.WriteRune(ch)
+ } else {
+ sb.WriteRune(unicode.ToLower(ch))
+ }
+ escaped = false
+ }
+ return sb.String()
+}
diff --git a/caddyconfig/httpcaddyfile/addresses_fuzz.go b/caddyconfig/httpcaddyfile/addresses_fuzz.go
new file mode 100644
index 00000000000..364ff971b11
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/addresses_fuzz.go
@@ -0,0 +1,28 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build gofuzz
+
+package httpcaddyfile
+
+func FuzzParseAddress(data []byte) int {
+ addr, err := ParseAddress(string(data))
+ if err != nil {
+ if addr == (Address{}) {
+ return 1
+ }
+ return 0
+ }
+ return 1
+}
diff --git a/caddyconfig/httpcaddyfile/addresses_test.go b/caddyconfig/httpcaddyfile/addresses_test.go
new file mode 100644
index 00000000000..232460d0ffb
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/addresses_test.go
@@ -0,0 +1,253 @@
+package httpcaddyfile
+
+import (
+ "testing"
+)
+
+func TestParseAddress(t *testing.T) {
+ for i, test := range []struct {
+ input string
+ scheme, host, port, path string
+ shouldErr bool
+ }{
+ {``, "", "", "", "", false},
+ {`localhost`, "", "localhost", "", "", false},
+ {`localhost:1234`, "", "localhost", "1234", "", false},
+ {`localhost:`, "", "localhost", "", "", false},
+ {`0.0.0.0`, "", "0.0.0.0", "", "", false},
+ {`127.0.0.1:1234`, "", "127.0.0.1", "1234", "", false},
+ {`:1234`, "", "", "1234", "", false},
+ {`[::1]`, "", "::1", "", "", false},
+ {`[::1]:1234`, "", "::1", "1234", "", false},
+ {`:`, "", "", "", "", false},
+ {`:http`, "", "", "", "", true},
+ {`:https`, "", "", "", "", true},
+ {`localhost:http`, "", "", "", "", true}, // using service name in port is verboten, as of Go 1.12.8
+ {`localhost:https`, "", "", "", "", true},
+ {`http://localhost:https`, "", "", "", "", true}, // conflict
+ {`http://localhost:http`, "", "", "", "", true}, // repeated scheme
+ {`host:https/path`, "", "", "", "", true},
+ {`http://localhost:443`, "http", "localhost", "443", "", false}, // NOTE: not conventional
+ {`https://localhost:80`, "https", "localhost", "80", "", false}, // NOTE: not conventional
+ {`http://localhost`, "http", "localhost", "", "", false},
+ {`https://localhost`, "https", "localhost", "", "", false},
+ {`http://{env.APP_DOMAIN}`, "http", "{env.APP_DOMAIN}", "", "", false},
+ {`{env.APP_DOMAIN}:80`, "", "{env.APP_DOMAIN}", "80", "", false},
+ {`{env.APP_DOMAIN}/path`, "", "{env.APP_DOMAIN}", "", "/path", false},
+ {`example.com/{env.APP_PATH}`, "", "example.com", "", "/{env.APP_PATH}", false},
+ {`http://127.0.0.1`, "http", "127.0.0.1", "", "", false},
+ {`https://127.0.0.1`, "https", "127.0.0.1", "", "", false},
+ {`http://[::1]`, "http", "::1", "", "", false},
+ {`http://localhost:1234`, "http", "localhost", "1234", "", false},
+ {`https://127.0.0.1:1234`, "https", "127.0.0.1", "1234", "", false},
+ {`http://[::1]:1234`, "http", "::1", "1234", "", false},
+ {``, "", "", "", "", false},
+ {`::1`, "", "::1", "", "", false},
+ {`localhost::`, "", "localhost::", "", "", false},
+ {`#$%@`, "", "#$%@", "", "", false}, // don't want to presume what the hostname could be
+ {`host/path`, "", "host", "", "/path", false},
+ {`http://host/`, "http", "host", "", "/", false},
+ {`//asdf`, "", "", "", "//asdf", false},
+ {`:1234/asdf`, "", "", "1234", "/asdf", false},
+ {`http://host/path`, "http", "host", "", "/path", false},
+ {`https://host:443/path/foo`, "https", "host", "443", "/path/foo", false},
+ {`host:80/path`, "", "host", "80", "/path", false},
+ {`/path`, "", "", "", "/path", false},
+ } {
+ actual, err := ParseAddress(test.input)
+
+ if err != nil && !test.shouldErr {
+ t.Errorf("Test %d (%s): Expected no error, but had error: %v", i, test.input, err)
+ }
+ if err == nil && test.shouldErr {
+ t.Errorf("Test %d (%s): Expected error, but had none (%#v)", i, test.input, actual)
+ }
+
+ if !test.shouldErr && actual.Original != test.input {
+ t.Errorf("Test %d (%s): Expected original '%s', got '%s'", i, test.input, test.input, actual.Original)
+ }
+ if actual.Scheme != test.scheme {
+ t.Errorf("Test %d (%s): Expected scheme '%s', got '%s'", i, test.input, test.scheme, actual.Scheme)
+ }
+ if actual.Host != test.host {
+ t.Errorf("Test %d (%s): Expected host '%s', got '%s'", i, test.input, test.host, actual.Host)
+ }
+ if actual.Port != test.port {
+ t.Errorf("Test %d (%s): Expected port '%s', got '%s'", i, test.input, test.port, actual.Port)
+ }
+ if actual.Path != test.path {
+ t.Errorf("Test %d (%s): Expected path '%s', got '%s'", i, test.input, test.path, actual.Path)
+ }
+ }
+}
+
+func TestAddressString(t *testing.T) {
+ for i, test := range []struct {
+ addr Address
+ expected string
+ }{
+ {Address{Scheme: "http", Host: "host", Port: "1234", Path: "/path"}, "http://host:1234/path"},
+ {Address{Scheme: "", Host: "host", Port: "", Path: ""}, "http://host"},
+ {Address{Scheme: "", Host: "host", Port: "80", Path: ""}, "http://host"},
+ {Address{Scheme: "", Host: "host", Port: "443", Path: ""}, "https://host"},
+ {Address{Scheme: "https", Host: "host", Port: "443", Path: ""}, "https://host"},
+ {Address{Scheme: "https", Host: "host", Port: "", Path: ""}, "https://host"},
+ {Address{Scheme: "", Host: "host", Port: "80", Path: "/path"}, "http://host/path"},
+ {Address{Scheme: "http", Host: "", Port: "1234", Path: ""}, "http://:1234"},
+ {Address{Scheme: "", Host: "", Port: "", Path: ""}, ""},
+ } {
+ actual := test.addr.String()
+ if actual != test.expected {
+ t.Errorf("Test %d: expected '%s' but got '%s'", i, test.expected, actual)
+ }
+ }
+}
+
+func TestKeyNormalization(t *testing.T) {
+ testCases := []struct {
+ input string
+ expect Address
+ }{
+ {
+ input: "example.com",
+ expect: Address{
+ Host: "example.com",
+ },
+ },
+ {
+ input: "http://host:1234/path",
+ expect: Address{
+ Scheme: "http",
+ Host: "host",
+ Port: "1234",
+ Path: "/path",
+ },
+ },
+ {
+ input: "HTTP://A/ABCDEF",
+ expect: Address{
+ Scheme: "http",
+ Host: "a",
+ Path: "/ABCDEF",
+ },
+ },
+ {
+ input: "A/ABCDEF",
+ expect: Address{
+ Host: "a",
+ Path: "/ABCDEF",
+ },
+ },
+ {
+ input: "A:2015/Path",
+ expect: Address{
+ Host: "a",
+ Port: "2015",
+ Path: "/Path",
+ },
+ },
+ {
+ input: "sub.{env.MY_DOMAIN}",
+ expect: Address{
+ Host: "sub.{env.MY_DOMAIN}",
+ },
+ },
+ {
+ input: "sub.ExAmPle",
+ expect: Address{
+ Host: "sub.example",
+ },
+ },
+ {
+ input: "sub.\\{env.MY_DOMAIN\\}",
+ expect: Address{
+ Host: "sub.\\{env.my_domain\\}",
+ },
+ },
+ {
+ input: "sub.{env.MY_DOMAIN}.com",
+ expect: Address{
+ Host: "sub.{env.MY_DOMAIN}.com",
+ },
+ },
+ {
+ input: ":80",
+ expect: Address{
+ Port: "80",
+ },
+ },
+ {
+ input: ":443",
+ expect: Address{
+ Port: "443",
+ },
+ },
+ {
+ input: ":1234",
+ expect: Address{
+ Port: "1234",
+ },
+ },
+ {
+ input: "",
+ expect: Address{},
+ },
+ {
+ input: ":",
+ expect: Address{},
+ },
+ {
+ input: "[::]",
+ expect: Address{
+ Host: "::",
+ },
+ },
+ {
+ input: "127.0.0.1",
+ expect: Address{
+ Host: "127.0.0.1",
+ },
+ },
+ {
+ input: "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1234",
+ expect: Address{
+ Host: "2001:db8:85a3:8d3:1319:8a2e:370:7348",
+ Port: "1234",
+ },
+ },
+ {
+ // IPv4 address in IPv6 form (#4381)
+ input: "[::ffff:cff4:e77d]:1234",
+ expect: Address{
+ Host: "::ffff:cff4:e77d",
+ Port: "1234",
+ },
+ },
+ {
+ input: "::ffff:cff4:e77d",
+ expect: Address{
+ Host: "::ffff:cff4:e77d",
+ },
+ },
+ }
+ for i, tc := range testCases {
+ addr, err := ParseAddress(tc.input)
+ if err != nil {
+ t.Errorf("Test %d: Parsing address '%s': %v", i, tc.input, err)
+ continue
+ }
+ actual := addr.Normalize()
+ if actual.Scheme != tc.expect.Scheme {
+ t.Errorf("Test %d: Input '%s': Expected Scheme='%s' but got Scheme='%s'", i, tc.input, tc.expect.Scheme, actual.Scheme)
+ }
+ if actual.Host != tc.expect.Host {
+ t.Errorf("Test %d: Input '%s': Expected Host='%s' but got Host='%s'", i, tc.input, tc.expect.Host, actual.Host)
+ }
+ if actual.Port != tc.expect.Port {
+ t.Errorf("Test %d: Input '%s': Expected Port='%s' but got Port='%s'", i, tc.input, tc.expect.Port, actual.Port)
+ }
+ if actual.Path != tc.expect.Path {
+ t.Errorf("Test %d: Input '%s': Expected Path='%s' but got Path='%s'", i, tc.input, tc.expect.Path, actual.Path)
+ }
+ }
+}
diff --git a/caddyconfig/httpcaddyfile/builtins.go b/caddyconfig/httpcaddyfile/builtins.go
new file mode 100644
index 00000000000..45570d01685
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/builtins.go
@@ -0,0 +1,1171 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "fmt"
+ "html"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/caddyserver/certmagic"
+ "github.com/mholt/acmez/v3/acme"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
+)
+
+func init() {
+ RegisterDirective("bind", parseBind)
+ RegisterDirective("tls", parseTLS)
+ RegisterHandlerDirective("fs", parseFilesystem)
+ RegisterDirective("root", parseRoot)
+ RegisterHandlerDirective("vars", parseVars)
+ RegisterHandlerDirective("redir", parseRedir)
+ RegisterHandlerDirective("respond", parseRespond)
+ RegisterHandlerDirective("abort", parseAbort)
+ RegisterHandlerDirective("error", parseError)
+ RegisterHandlerDirective("route", parseRoute)
+ RegisterHandlerDirective("handle", parseHandle)
+ RegisterDirective("handle_errors", parseHandleErrors)
+ RegisterHandlerDirective("invoke", parseInvoke)
+ RegisterDirective("log", parseLog)
+ RegisterHandlerDirective("skip_log", parseLogSkip)
+ RegisterHandlerDirective("log_skip", parseLogSkip)
+ RegisterHandlerDirective("log_name", parseLogName)
+}
+
+// parseBind parses the bind directive. Syntax:
+//
+// bind [{
+// protocols [h1|h2|h2c|h3] [...]
+// }]
+func parseBind(h Helper) ([]ConfigValue, error) {
+ h.Next() // consume directive name
+ var addresses, protocols []string
+ addresses = h.RemainingArgs()
+
+ for h.NextBlock(0) {
+ switch h.Val() {
+ case "protocols":
+ protocols = h.RemainingArgs()
+ if len(protocols) == 0 {
+ return nil, h.Errf("protocols requires one or more arguments")
+ }
+ default:
+ return nil, h.Errf("unknown subdirective: %s", h.Val())
+ }
+ }
+
+ return []ConfigValue{{Class: "bind", Value: addressesWithProtocols{
+ addresses: addresses,
+ protocols: protocols,
+ }}}, nil
+}
+
+// parseTLS parses the tls directive. Syntax:
+//
+// tls [|internal|force_automate]|[] {
+// protocols []
+// ciphers
+// curves
+// client_auth {
+// mode [request|require|verify_if_given|require_and_verify]
+// trust_pool [...]
+// trusted_leaf_cert
+// trusted_leaf_cert_file
+// }
+// alpn
+// load
+// ca
+// ca_root
+// key_type [ed25519|p256|p384|rsa2048|rsa4096]
+// dns [...]
+// propagation_delay
+// propagation_timeout
+// resolvers
+// dns_ttl
+// dns_challenge_override_domain
+// on_demand
+// reuse_private_keys
+// force_automate
+// eab
+// issuer [...]
+// get_certificate [...]
+// insecure_secrets_log
+// }
+func parseTLS(h Helper) ([]ConfigValue, error) {
+ h.Next() // consume directive name
+
+ cp := new(caddytls.ConnectionPolicy)
+ var fileLoader caddytls.FileLoader
+ var folderLoader caddytls.FolderLoader
+ var certSelector caddytls.CustomCertSelectionPolicy
+ var acmeIssuer *caddytls.ACMEIssuer
+ var keyType string
+ var internalIssuer *caddytls.InternalIssuer
+ var issuers []certmagic.Issuer
+ var certManagers []certmagic.Manager
+ var onDemand bool
+ var reusePrivateKeys bool
+ var forceAutomate bool
+
+ firstLine := h.RemainingArgs()
+ switch len(firstLine) {
+ case 0:
+ case 1:
+ if firstLine[0] == "internal" {
+ internalIssuer = new(caddytls.InternalIssuer)
+ } else if firstLine[0] == "force_automate" {
+ forceAutomate = true
+ } else if !strings.Contains(firstLine[0], "@") {
+ return nil, h.Err("single argument must either be 'internal', 'force_automate', or an email address")
+ } else {
+ acmeIssuer = &caddytls.ACMEIssuer{
+ Email: firstLine[0],
+ }
+ }
+
+ case 2:
+ // file certificate loader
+ certFilename := firstLine[0]
+ keyFilename := firstLine[1]
+
+ // tag this certificate so if multiple certs match, specifically
+ // this one that the user has provided will be used, see #2588:
+ // https://github.com/caddyserver/caddy/issues/2588 ... but we
+ // must be careful about how we do this; being careless will
+ // lead to failed handshakes
+ //
+ // we need to remember which cert files we've seen, since we
+ // must load each cert only once; otherwise, they each get a
+ // different tag... since a cert loaded twice has the same
+ // bytes, it will overwrite the first one in the cache, and
+ // only the last cert (and its tag) will survive, so any conn
+ // policy that is looking for any tag other than the last one
+ // to be loaded won't find it, and TLS handshakes will fail
+ // (see end of issue #3004)
+ //
+ // tlsCertTags maps certificate filenames to their tag.
+ // This is used to remember which tag is used for each
+ // certificate files, since we need to avoid loading
+ // the same certificate files more than once, overwriting
+ // previous tags
+ tlsCertTags, ok := h.State["tlsCertTags"].(map[string]string)
+ if !ok {
+ tlsCertTags = make(map[string]string)
+ h.State["tlsCertTags"] = tlsCertTags
+ }
+
+ tag, ok := tlsCertTags[certFilename]
+ if !ok {
+ // haven't seen this cert file yet, let's give it a tag
+ // and add a loader for it
+ tag = fmt.Sprintf("cert%d", len(tlsCertTags))
+ fileLoader = append(fileLoader, caddytls.CertKeyFilePair{
+ Certificate: certFilename,
+ Key: keyFilename,
+ Tags: []string{tag},
+ })
+ // remember this for next time we see this cert file
+ tlsCertTags[certFilename] = tag
+ }
+ certSelector.AnyTag = append(certSelector.AnyTag, tag)
+
+ default:
+ return nil, h.ArgErr()
+ }
+
+ var hasBlock bool
+ for h.NextBlock(0) {
+ hasBlock = true
+
+ switch h.Val() {
+ case "protocols":
+ args := h.RemainingArgs()
+ if len(args) == 0 {
+ return nil, h.Errf("protocols requires one or two arguments")
+ }
+ if len(args) > 0 {
+ if _, ok := caddytls.SupportedProtocols[args[0]]; !ok {
+ return nil, h.Errf("wrong protocol name or protocol not supported: '%s'", args[0])
+ }
+ cp.ProtocolMin = args[0]
+ }
+ if len(args) > 1 {
+ if _, ok := caddytls.SupportedProtocols[args[1]]; !ok {
+ return nil, h.Errf("wrong protocol name or protocol not supported: '%s'", args[1])
+ }
+ cp.ProtocolMax = args[1]
+ }
+
+ case "ciphers":
+ for h.NextArg() {
+ if !caddytls.CipherSuiteNameSupported(h.Val()) {
+ return nil, h.Errf("wrong cipher suite name or cipher suite not supported: '%s'", h.Val())
+ }
+ cp.CipherSuites = append(cp.CipherSuites, h.Val())
+ }
+
+ case "curves":
+ for h.NextArg() {
+ if _, ok := caddytls.SupportedCurves[h.Val()]; !ok {
+ return nil, h.Errf("Wrong curve name or curve not supported: '%s'", h.Val())
+ }
+ cp.Curves = append(cp.Curves, h.Val())
+ }
+
+ case "client_auth":
+ cp.ClientAuthentication = &caddytls.ClientAuthentication{}
+ if err := cp.ClientAuthentication.UnmarshalCaddyfile(h.NewFromNextSegment()); err != nil {
+ return nil, err
+ }
+ case "alpn":
+ args := h.RemainingArgs()
+ if len(args) == 0 {
+ return nil, h.ArgErr()
+ }
+ cp.ALPN = args
+
+ case "load":
+ folderLoader = append(folderLoader, h.RemainingArgs()...)
+
+ case "ca":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ acmeIssuer.CA = arg[0]
+
+ case "key_type":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ keyType = arg[0]
+
+ case "eab":
+ arg := h.RemainingArgs()
+ if len(arg) != 2 {
+ return nil, h.ArgErr()
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ acmeIssuer.ExternalAccount = &acme.EAB{
+ KeyID: arg[0],
+ MACKey: arg[1],
+ }
+
+ case "issuer":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ modName := h.Val()
+ modID := "tls.issuance." + modName
+ unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
+ if err != nil {
+ return nil, err
+ }
+ issuer, ok := unm.(certmagic.Issuer)
+ if !ok {
+ return nil, h.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm)
+ }
+ issuers = append(issuers, issuer)
+
+ case "get_certificate":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ modName := h.Val()
+ modID := "tls.get_certificate." + modName
+ unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
+ if err != nil {
+ return nil, err
+ }
+ certManager, ok := unm.(certmagic.Manager)
+ if !ok {
+ return nil, h.Errf("module %s (%T) is not a certmagic.CertificateManager", modID, unm)
+ }
+ certManagers = append(certManagers, certManager)
+
+ case "dns":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ provName := h.Val()
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.DNS == nil {
+ acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
+ }
+ modID := "dns.providers." + provName
+ unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
+ if err != nil {
+ return nil, err
+ }
+ acmeIssuer.Challenges.DNS.ProviderRaw = caddyconfig.JSONModuleObject(unm, "name", provName, h.warnings)
+
+ case "resolvers":
+ args := h.RemainingArgs()
+ if len(args) == 0 {
+ return nil, h.ArgErr()
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.DNS == nil {
+ acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
+ }
+ acmeIssuer.Challenges.DNS.Resolvers = args
+
+ case "propagation_delay":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ delayStr := arg[0]
+ delay, err := caddy.ParseDuration(delayStr)
+ if err != nil {
+ return nil, h.Errf("invalid propagation_delay duration %s: %v", delayStr, err)
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.DNS == nil {
+ acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
+ }
+ acmeIssuer.Challenges.DNS.PropagationDelay = caddy.Duration(delay)
+
+ case "propagation_timeout":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ timeoutStr := arg[0]
+ var timeout time.Duration
+ if timeoutStr == "-1" {
+ timeout = time.Duration(-1)
+ } else {
+ var err error
+ timeout, err = caddy.ParseDuration(timeoutStr)
+ if err != nil {
+ return nil, h.Errf("invalid propagation_timeout duration %s: %v", timeoutStr, err)
+ }
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.DNS == nil {
+ acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
+ }
+ acmeIssuer.Challenges.DNS.PropagationTimeout = caddy.Duration(timeout)
+
+ case "dns_ttl":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ ttlStr := arg[0]
+ ttl, err := caddy.ParseDuration(ttlStr)
+ if err != nil {
+ return nil, h.Errf("invalid dns_ttl duration %s: %v", ttlStr, err)
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.DNS == nil {
+ acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
+ }
+ acmeIssuer.Challenges.DNS.TTL = caddy.Duration(ttl)
+
+ case "dns_challenge_override_domain":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.DNS == nil {
+ acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
+ }
+ acmeIssuer.Challenges.DNS.OverrideDomain = arg[0]
+
+ case "ca_root":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ if acmeIssuer == nil {
+ acmeIssuer = new(caddytls.ACMEIssuer)
+ }
+ acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, arg[0])
+
+ case "on_demand":
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ onDemand = true
+
+ case "reuse_private_keys":
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ reusePrivateKeys = true
+
+ case "insecure_secrets_log":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ cp.InsecureSecretsLog = h.Val()
+
+ default:
+ return nil, h.Errf("unknown subdirective: %s", h.Val())
+ }
+ }
+
+ // a naked tls directive is not allowed
+ if len(firstLine) == 0 && !hasBlock {
+ return nil, h.ArgErr()
+ }
+
+ // begin building the final config values
+ configVals := []ConfigValue{}
+
+ // certificate loaders
+ if len(fileLoader) > 0 {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.cert_loader",
+ Value: fileLoader,
+ })
+ }
+ if len(folderLoader) > 0 {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.cert_loader",
+ Value: folderLoader,
+ })
+ }
+
+ // some tls subdirectives are shortcuts that implicitly configure issuers, and the
+ // user can also configure issuers explicitly using the issuer subdirective; the
+ // logic to support both would likely be complex, or at least unintuitive
+ if len(issuers) > 0 && (acmeIssuer != nil || internalIssuer != nil) {
+ return nil, h.Err("cannot mix issuer subdirective (explicit issuers) with other issuer-specific subdirectives (implicit issuers)")
+ }
+ if acmeIssuer != nil && internalIssuer != nil {
+ return nil, h.Err("cannot create both ACME and internal certificate issuers")
+ }
+
+ // now we should either have: explicitly-created issuers, or an implicitly-created
+ // ACME or internal issuer, or no issuers at all
+ switch {
+ case len(issuers) > 0:
+ for _, issuer := range issuers {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.cert_issuer",
+ Value: issuer,
+ })
+ }
+
+ case acmeIssuer != nil:
+ // implicit ACME issuers (from various subdirectives) - use defaults; there might be more than one
+ defaultIssuers := caddytls.DefaultIssuers(acmeIssuer.Email)
+
+ // if an ACME CA endpoint was set, the user expects to use that specific one,
+ // not any others that may be defaults, so replace all defaults with that ACME CA
+ if acmeIssuer.CA != "" {
+ defaultIssuers = []certmagic.Issuer{acmeIssuer}
+ }
+
+ for _, issuer := range defaultIssuers {
+ // apply settings from the implicitly-configured ACMEIssuer to any
+ // default ACMEIssuers, but preserve each default issuer's CA endpoint,
+ // because, for example, if you configure the DNS challenge, it should
+ // apply to any of the default ACMEIssuers, but you don't want to trample
+ // out their unique CA endpoints
+ if iss, ok := issuer.(*caddytls.ACMEIssuer); ok && iss != nil {
+ acmeCopy := *acmeIssuer
+ acmeCopy.CA = iss.CA
+ issuer = &acmeCopy
+ }
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.cert_issuer",
+ Value: issuer,
+ })
+ }
+
+ case internalIssuer != nil:
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.cert_issuer",
+ Value: internalIssuer,
+ })
+ }
+
+ // certificate key type
+ if keyType != "" {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.key_type",
+ Value: keyType,
+ })
+ }
+
+ // on-demand TLS
+ if onDemand {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.on_demand",
+ Value: true,
+ })
+ }
+ for _, certManager := range certManagers {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.cert_manager",
+ Value: certManager,
+ })
+ }
+
+ // reuse private keys TLS
+ if reusePrivateKeys {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.reuse_private_keys",
+ Value: true,
+ })
+ }
+
+ // if enabled, the names in the site addresses will be
+ // added to the automation policies
+ if forceAutomate {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.force_automate",
+ Value: true,
+ })
+ }
+
+ // custom certificate selection
+ if len(certSelector.AnyTag) > 0 {
+ cp.CertSelection = &certSelector
+ }
+
+ // connection policy -- always add one, to ensure that TLS
+ // is enabled, because this directive was used (this is
+ // needed, for instance, when a site block has a key of
+ // just ":5000" - i.e. no hostname, and only on-demand TLS
+ // is enabled)
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.connection_policy",
+ Value: cp,
+ })
+
+ return configVals, nil
+}
+
+// parseRoot parses the root directive. Syntax:
+//
+// root []
+func parseRoot(h Helper) ([]ConfigValue, error) {
+ h.Next() // consume directive name
+
+ // count the tokens to determine what to do
+ argsCount := h.CountRemainingArgs()
+ if argsCount == 0 {
+ return nil, h.Errf("too few arguments; must have at least a root path")
+ }
+ if argsCount > 2 {
+ return nil, h.Errf("too many arguments; should only be a matcher and a path")
+ }
+
+ // with only one arg, assume it's a root path with no matcher token
+ if argsCount == 1 {
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ return h.NewRoute(nil, caddyhttp.VarsMiddleware{"root": h.Val()}), nil
+ }
+
+ // parse the matcher token into a matcher set
+ userMatcherSet, err := h.ExtractMatcherSet()
+ if err != nil {
+ return nil, err
+ }
+ h.Next() // consume directive name again, matcher parsing does a reset
+
+ // advance to the root path
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ // make the route with the matcher
+ return h.NewRoute(userMatcherSet, caddyhttp.VarsMiddleware{"root": h.Val()}), nil
+}
+
+// parseFilesystem parses the fs directive. Syntax:
+//
+// fs
+func parseFilesystem(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive name
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ return caddyhttp.VarsMiddleware{"fs": h.Val()}, nil
+}
+
+// parseVars parses the vars directive. See its UnmarshalCaddyfile method for syntax.
+func parseVars(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ v := new(caddyhttp.VarsMiddleware)
+ err := v.UnmarshalCaddyfile(h.Dispenser)
+ return v, err
+}
+
+// parseRedir parses the redir directive. Syntax:
+//
+// redir [] []
+//
+// can be "permanent" for 301, "temporary" for 302 (default),
+// a placeholder, or any number in the 3xx range or 401. The special
+// code "html" can be used to redirect only browser clients (will
+// respond with HTTP 200 and no Location header; redirect is performed
+// with JS and a meta tag).
+func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive name
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ to := h.Val()
+
+ var code string
+ if h.NextArg() {
+ code = h.Val()
+ }
+
+ var body string
+ var hdr http.Header
+ switch code {
+ case "permanent":
+ code = "301"
+
+ case "temporary", "":
+ code = "302"
+
+ case "html":
+ // Script tag comes first since that will better imitate a redirect in the browser's
+ // history, but the meta tag is a fallback for most non-JS clients.
+ const metaRedir = `
+
+
+ Redirecting...
+
+
+
+ Redirecting to %s...
+
+`
+ safeTo := html.EscapeString(to)
+ body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo)
+ hdr = http.Header{"Content-Type": []string{"text/html; charset=utf-8"}}
+ code = "200" // don't redirect non-browser clients
+
+ default:
+ // Allow placeholders for the code
+ if strings.HasPrefix(code, "{") {
+ break
+ }
+ // Try to validate as an integer otherwise
+ codeInt, err := strconv.Atoi(code)
+ if err != nil {
+ return nil, h.Errf("Not a supported redir code type or not valid integer: '%s'", code)
+ }
+ // Sometimes, a 401 with Location header is desirable because
+ // requests made with XHR will "eat" the 3xx redirect; so if
+ // the intent was to redirect to an auth page, a 3xx won't
+ // work. Responding with 401 allows JS code to read the
+ // Location header and do a window.location redirect manually.
+ // see https://stackoverflow.com/a/2573589/846934
+ // see https://github.com/oauth2-proxy/oauth2-proxy/issues/1522
+ if codeInt < 300 || (codeInt > 399 && codeInt != 401) {
+ return nil, h.Errf("Redir code not in the 3xx range or 401: '%v'", codeInt)
+ }
+ }
+
+ // don't redirect non-browser clients
+ if code != "200" {
+ hdr = http.Header{"Location": []string{to}}
+ }
+
+ return caddyhttp.StaticResponse{
+ StatusCode: caddyhttp.WeakString(code),
+ Headers: hdr,
+ Body: body,
+ }, nil
+}
+
+// parseRespond parses the respond directive.
+func parseRespond(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ sr := new(caddyhttp.StaticResponse)
+ err := sr.UnmarshalCaddyfile(h.Dispenser)
+ return sr, err
+}
+
+// parseAbort parses the abort directive.
+func parseAbort(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive
+ for h.Next() || h.NextBlock(0) {
+ return nil, h.ArgErr()
+ }
+ return &caddyhttp.StaticResponse{Abort: true}, nil
+}
+
+// parseError parses the error directive.
+func parseError(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ se := new(caddyhttp.StaticError)
+ err := se.UnmarshalCaddyfile(h.Dispenser)
+ return se, err
+}
+
+// parseRoute parses the route directive.
+func parseRoute(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ allResults, err := parseSegmentAsConfig(h)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, result := range allResults {
+ switch result.Value.(type) {
+ case caddyhttp.Route, caddyhttp.Subroute:
+ default:
+ return nil, h.Errf("%s directive returned something other than an HTTP route or subroute: %#v (only handler directives can be used in routes)", result.directive, result.Value)
+ }
+ }
+
+ return buildSubroute(allResults, h.groupCounter, false)
+}
+
+func parseHandle(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ return ParseSegmentAsSubroute(h)
+}
+
+func parseHandleErrors(h Helper) ([]ConfigValue, error) {
+ h.Next() // consume directive name
+
+ expression := ""
+ args := h.RemainingArgs()
+ if len(args) > 0 {
+ codes := []string{}
+ for _, val := range args {
+ if len(val) != 3 {
+ return nil, h.Errf("bad status value '%s'", val)
+ }
+ if strings.HasSuffix(val, "xx") {
+ val = val[:1]
+ _, err := strconv.Atoi(val)
+ if err != nil {
+ return nil, h.Errf("bad status value '%s': %v", val, err)
+ }
+ if expression != "" {
+ expression += " || "
+ }
+ expression += fmt.Sprintf("{http.error.status_code} >= %s00 && {http.error.status_code} <= %s99", val, val)
+ continue
+ }
+ _, err := strconv.Atoi(val)
+ if err != nil {
+ return nil, h.Errf("bad status value '%s': %v", val, err)
+ }
+ codes = append(codes, val)
+ }
+ if len(codes) > 0 {
+ if expression != "" {
+ expression += " || "
+ }
+ expression += "{http.error.status_code} in [" + strings.Join(codes, ", ") + "]"
+ }
+ // Reset cursor position to get ready for ParseSegmentAsSubroute
+ h.Reset()
+ h.Next()
+ h.RemainingArgs()
+ h.Prev()
+ } else {
+ // If no arguments present reset the cursor position to get ready for ParseSegmentAsSubroute
+ h.Prev()
+ }
+
+ handler, err := ParseSegmentAsSubroute(h)
+ if err != nil {
+ return nil, err
+ }
+ subroute, ok := handler.(*caddyhttp.Subroute)
+ if !ok {
+ return nil, h.Errf("segment was not parsed as a subroute")
+ }
+
+ if expression != "" {
+ statusMatcher := caddy.ModuleMap{
+ "expression": h.JSON(caddyhttp.MatchExpression{Expr: expression}),
+ }
+ for i := range subroute.Routes {
+ subroute.Routes[i].MatcherSetsRaw = []caddy.ModuleMap{statusMatcher}
+ }
+ }
+ return []ConfigValue{
+ {
+ Class: "error_route",
+ Value: subroute,
+ },
+ }, nil
+}
+
+// parseInvoke parses the invoke directive.
+func parseInvoke(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ for h.Next() || h.NextBlock(0) {
+ return nil, h.ArgErr()
+ }
+
+ // remember that we're invoking this name
+ // to populate the server with these named routes
+ if h.State[namedRouteKey] == nil {
+ h.State[namedRouteKey] = map[string]struct{}{}
+ }
+ h.State[namedRouteKey].(map[string]struct{})[h.Val()] = struct{}{}
+
+ // return the handler
+ return &caddyhttp.Invoke{Name: h.Val()}, nil
+}
+
+// parseLog parses the log directive. Syntax:
+//
+// log {
+// hostnames
+// output ...
+// core ...
+// format ...
+// level
+// }
+func parseLog(h Helper) ([]ConfigValue, error) {
+ return parseLogHelper(h, nil)
+}
+
+// parseLogHelper is used both for the parseLog directive within Server Blocks,
+// as well as the global "log" option for configuring loggers at the global
+// level. The parseAsGlobalOption parameter is used to distinguish any differing logic
+// between the two.
+func parseLogHelper(h Helper, globalLogNames map[string]struct{}) ([]ConfigValue, error) {
+ h.Next() // consume option name
+
+ // When the globalLogNames parameter is passed in, we make
+ // modifications to the parsing behavior.
+ parseAsGlobalOption := globalLogNames != nil
+
+ var configValues []ConfigValue
+
+ // Logic below expects that a name is always present when a
+ // global option is being parsed; or an optional override
+ // is supported for access logs.
+ var logName string
+
+ if parseAsGlobalOption {
+ if h.NextArg() {
+ logName = h.Val()
+
+ // Only a single argument is supported.
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ } else {
+ // If there is no log name specified, we
+ // reference the default logger. See the
+ // setupNewDefault function in the logging
+ // package for where this is configured.
+ logName = caddy.DefaultLoggerName
+ }
+
+ // Verify this name is unused.
+ _, used := globalLogNames[logName]
+ if used {
+ return nil, h.Err("duplicate global log option for: " + logName)
+ }
+ globalLogNames[logName] = struct{}{}
+ } else {
+ // An optional override of the logger name can be provided;
+ // otherwise a default will be used, like "log0", "log1", etc.
+ if h.NextArg() {
+ logName = h.Val()
+
+ // Only a single argument is supported.
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ }
+ }
+
+ cl := new(caddy.CustomLog)
+
+ // allow overriding the current site block's hostnames for this logger;
+ // this is useful for setting up loggers per subdomain in a site block
+ // with a wildcard domain
+ customHostnames := []string{}
+ noHostname := false
+ for h.NextBlock(0) {
+ switch h.Val() {
+ case "hostnames":
+ if parseAsGlobalOption {
+ return nil, h.Err("hostnames is not allowed in the log global options")
+ }
+ args := h.RemainingArgs()
+ if len(args) == 0 {
+ return nil, h.ArgErr()
+ }
+ customHostnames = append(customHostnames, args...)
+
+ case "output":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ moduleName := h.Val()
+
+ // can't use the usual caddyfile.Unmarshaler flow with the
+ // standard writers because they are in the caddy package
+ // (because they are the default) and implementing that
+ // interface there would unfortunately create circular import
+ var wo caddy.WriterOpener
+ switch moduleName {
+ case "stdout":
+ wo = caddy.StdoutWriter{}
+ case "stderr":
+ wo = caddy.StderrWriter{}
+ case "discard":
+ wo = caddy.DiscardWriter{}
+ default:
+ modID := "caddy.logging.writers." + moduleName
+ unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
+ if err != nil {
+ return nil, err
+ }
+ var ok bool
+ wo, ok = unm.(caddy.WriterOpener)
+ if !ok {
+ return nil, h.Errf("module %s (%T) is not a WriterOpener", modID, unm)
+ }
+ }
+ cl.WriterRaw = caddyconfig.JSONModuleObject(wo, "output", moduleName, h.warnings)
+
+ case "sampling":
+ d := h.Dispenser.NewFromNextSegment()
+ for d.NextArg() {
+ // consume any tokens on the same line, if any.
+ }
+
+ sampling := &caddy.LogSampling{}
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ subdir := d.Val()
+ switch subdir {
+ case "interval":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ interval, err := time.ParseDuration(d.Val() + "ns")
+ if err != nil {
+ return nil, d.Errf("failed to parse interval: %v", err)
+ }
+ sampling.Interval = interval
+ case "first":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ first, err := strconv.Atoi(d.Val())
+ if err != nil {
+ return nil, d.Errf("failed to parse first: %v", err)
+ }
+ sampling.First = first
+ case "thereafter":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ thereafter, err := strconv.Atoi(d.Val())
+ if err != nil {
+ return nil, d.Errf("failed to parse thereafter: %v", err)
+ }
+ sampling.Thereafter = thereafter
+ default:
+ return nil, d.Errf("unrecognized subdirective: %s", subdir)
+ }
+ }
+
+ cl.Sampling = sampling
+
+ case "core":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ moduleName := h.Val()
+ moduleID := "caddy.logging.cores." + moduleName
+ unm, err := caddyfile.UnmarshalModule(h.Dispenser, moduleID)
+ if err != nil {
+ return nil, err
+ }
+ core, ok := unm.(zapcore.Core)
+ if !ok {
+ return nil, h.Errf("module %s (%T) is not a zapcore.Core", moduleID, unm)
+ }
+ cl.CoreRaw = caddyconfig.JSONModuleObject(core, "module", moduleName, h.warnings)
+
+ case "format":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ moduleName := h.Val()
+ moduleID := "caddy.logging.encoders." + moduleName
+ unm, err := caddyfile.UnmarshalModule(h.Dispenser, moduleID)
+ if err != nil {
+ return nil, err
+ }
+ enc, ok := unm.(zapcore.Encoder)
+ if !ok {
+ return nil, h.Errf("module %s (%T) is not a zapcore.Encoder", moduleID, unm)
+ }
+ cl.EncoderRaw = caddyconfig.JSONModuleObject(enc, "format", moduleName, h.warnings)
+
+ case "level":
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ cl.Level = h.Val()
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+
+ case "include":
+ if !parseAsGlobalOption {
+ return nil, h.Err("include is not allowed in the log directive")
+ }
+ for h.NextArg() {
+ cl.Include = append(cl.Include, h.Val())
+ }
+
+ case "exclude":
+ if !parseAsGlobalOption {
+ return nil, h.Err("exclude is not allowed in the log directive")
+ }
+ for h.NextArg() {
+ cl.Exclude = append(cl.Exclude, h.Val())
+ }
+
+ case "no_hostname":
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ noHostname = true
+
+ default:
+ return nil, h.Errf("unrecognized subdirective: %s", h.Val())
+ }
+ }
+
+ var val namedCustomLog
+ val.hostnames = customHostnames
+ val.noHostname = noHostname
+ isEmptyConfig := reflect.DeepEqual(cl, new(caddy.CustomLog))
+
+ // Skip handling of empty logging configs
+
+ if parseAsGlobalOption {
+ // Use indicated name for global log options
+ val.name = logName
+ } else {
+ if logName != "" {
+ val.name = logName
+ } else if !isEmptyConfig {
+ // Construct a log name for server log streams
+ logCounter, ok := h.State["logCounter"].(int)
+ if !ok {
+ logCounter = 0
+ }
+ val.name = fmt.Sprintf("log%d", logCounter)
+ logCounter++
+ h.State["logCounter"] = logCounter
+ }
+ if val.name != "" {
+ cl.Include = []string{"http.log.access." + val.name}
+ }
+ }
+ if !isEmptyConfig {
+ val.log = cl
+ }
+ configValues = append(configValues, ConfigValue{
+ Class: "custom_log",
+ Value: val,
+ })
+ return configValues, nil
+}
+
+// parseLogSkip parses the log_skip directive. Syntax:
+//
+// log_skip []
+func parseLogSkip(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive name
+
+ // "skip_log" is deprecated, replaced by "log_skip"
+ if h.Val() == "skip_log" {
+ caddy.Log().Named("config.adapter.caddyfile").Warn("the 'skip_log' directive is deprecated, please use 'log_skip' instead!")
+ }
+
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ return caddyhttp.VarsMiddleware{"log_skip": true}, nil
+}
+
+// parseLogName parses the log_name directive. Syntax:
+//
+// log_name
+func parseLogName(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive name
+ return caddyhttp.VarsMiddleware{
+ caddyhttp.AccessLoggerNameVarKey: h.RemainingArgs(),
+ }, nil
+}
diff --git a/caddyconfig/httpcaddyfile/builtins_test.go b/caddyconfig/httpcaddyfile/builtins_test.go
new file mode 100644
index 00000000000..c23531f22e6
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/builtins_test.go
@@ -0,0 +1,369 @@
+package httpcaddyfile
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ _ "github.com/caddyserver/caddy/v2/modules/logging"
+)
+
+func TestLogDirectiveSyntax(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ output string
+ expectError bool
+ }{
+ {
+ input: `:8080 {
+ log
+ }
+ `,
+ output: `{"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{}}}}}}`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ log {
+ core mock
+ output file foo.log
+ }
+ }
+ `,
+ output: `{"logging":{"logs":{"default":{"exclude":["http.log.access.log0"]},"log0":{"writer":{"filename":"foo.log","output":"file"},"core":{"module":"mock"},"include":["http.log.access.log0"]}}},"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{"default_logger_name":"log0"}}}}}}`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ log {
+ format filter {
+ wrap console
+ fields {
+ request>remote_ip ip_mask {
+ ipv4 24
+ ipv6 32
+ }
+ }
+ }
+ }
+ }
+ `,
+ output: `{"logging":{"logs":{"default":{"exclude":["http.log.access.log0"]},"log0":{"encoder":{"fields":{"request\u003eremote_ip":{"filter":"ip_mask","ipv4_cidr":24,"ipv6_cidr":32}},"format":"filter","wrap":{"format":"console"}},"include":["http.log.access.log0"]}}},"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{"default_logger_name":"log0"}}}}}}`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ log name-override {
+ core mock
+ output file foo.log
+ }
+ }
+ `,
+ output: `{"logging":{"logs":{"default":{"exclude":["http.log.access.name-override"]},"name-override":{"writer":{"filename":"foo.log","output":"file"},"core":{"module":"mock"},"include":["http.log.access.name-override"]}}},"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{"default_logger_name":"name-override"}}}}}}`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ log {
+ sampling {
+ interval 2
+ first 3
+ thereafter 4
+ }
+ }
+ }
+ `,
+ output: `{"logging":{"logs":{"default":{"exclude":["http.log.access.log0"]},"log0":{"sampling":{"interval":2,"first":3,"thereafter":4},"include":["http.log.access.log0"]}}},"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{"default_logger_name":"log0"}}}}}}`,
+ expectError: false,
+ },
+ } {
+
+ adapter := caddyfile.Adapter{
+ ServerType: ServerType{},
+ }
+
+ out, _, err := adapter.Adapt([]byte(tc.input), nil)
+
+ if err != nil != tc.expectError {
+ t.Errorf("Test %d error expectation failed Expected: %v, got %s", i, tc.expectError, err)
+ continue
+ }
+
+ if string(out) != tc.output {
+ t.Errorf("Test %d error output mismatch Expected: %s, got %s", i, tc.output, out)
+ }
+ }
+}
+
+func TestRedirDirectiveSyntax(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ expectError bool
+ }{
+ {
+ input: `:8080 {
+ redir :8081
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir * :8081
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /api/* :8081 300
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir :8081 300
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /api/* :8081 399
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir :8081 399
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /old.html /new.html
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /old.html /new.html temporary
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir https://example.com{uri} permanent
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /old.html /new.html permanent
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /old.html /new.html html
+ }`,
+ expectError: false,
+ },
+ {
+ // this is now allowed so a Location header
+ // can be written and consumed by JS
+ // in the case of XHR requests
+ input: `:8080 {
+ redir * :8081 401
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir * :8081 402
+ }`,
+ expectError: true,
+ },
+ {
+ input: `:8080 {
+ redir * :8081 {http.reverse_proxy.status_code}
+ }`,
+ expectError: false,
+ },
+ {
+ input: `:8080 {
+ redir /old.html /new.html htlm
+ }`,
+ expectError: true,
+ },
+ {
+ input: `:8080 {
+ redir * :8081 200
+ }`,
+ expectError: true,
+ },
+ {
+ input: `:8080 {
+ redir * :8081 temp
+ }`,
+ expectError: true,
+ },
+ {
+ input: `:8080 {
+ redir * :8081 perm
+ }`,
+ expectError: true,
+ },
+ {
+ input: `:8080 {
+ redir * :8081 php
+ }`,
+ expectError: true,
+ },
+ } {
+
+ adapter := caddyfile.Adapter{
+ ServerType: ServerType{},
+ }
+
+ _, _, err := adapter.Adapt([]byte(tc.input), nil)
+
+ if err != nil != tc.expectError {
+ t.Errorf("Test %d error expectation failed Expected: %v, got %s", i, tc.expectError, err)
+ continue
+ }
+ }
+}
+
+func TestImportErrorLine(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ errorFunc func(err error) bool
+ }{
+ {
+ input: `(t1) {
+ abort {args[:]}
+ }
+ :8080 {
+ import t1
+ import t1 true
+ }`,
+ errorFunc: func(err error) bool {
+ return err != nil && strings.Contains(err.Error(), "Caddyfile:6 (import t1)")
+ },
+ },
+ {
+ input: `(t1) {
+ abort {args[:]}
+ }
+ :8080 {
+ import t1 true
+ }`,
+ errorFunc: func(err error) bool {
+ return err != nil && strings.Contains(err.Error(), "Caddyfile:5 (import t1)")
+ },
+ },
+ {
+ input: `
+ import testdata/import_variadic_snippet.txt
+ :8080 {
+ import t1 true
+ }`,
+ errorFunc: func(err error) bool {
+ return err == nil
+ },
+ },
+ {
+ input: `
+ import testdata/import_variadic_with_import.txt
+ :8080 {
+ import t1 true
+ import t2 true
+ }`,
+ errorFunc: func(err error) bool {
+ return err == nil
+ },
+ },
+ } {
+ adapter := caddyfile.Adapter{
+ ServerType: ServerType{},
+ }
+
+ _, _, err := adapter.Adapt([]byte(tc.input), nil)
+
+ if !tc.errorFunc(err) {
+ t.Errorf("Test %d error expectation failed, got %s", i, err)
+ continue
+ }
+ }
+}
+
+func TestNestedImport(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ errorFunc func(err error) bool
+ }{
+ {
+ input: `(t1) {
+ respond {args[0]} {args[1]}
+ }
+
+ (t2) {
+ import t1 {args[0]} 202
+ }
+
+ :8080 {
+ handle {
+ import t2 "foobar"
+ }
+ }`,
+ errorFunc: func(err error) bool {
+ return err == nil
+ },
+ },
+ {
+ input: `(t1) {
+ respond {args[:]}
+ }
+
+ (t2) {
+ import t1 {args[0]} {args[1]}
+ }
+
+ :8080 {
+ handle {
+ import t2 "foobar" 202
+ }
+ }`,
+ errorFunc: func(err error) bool {
+ return err == nil
+ },
+ },
+ {
+ input: `(t1) {
+ respond {args[0]} {args[1]}
+ }
+
+ (t2) {
+ import t1 {args[:]}
+ }
+
+ :8080 {
+ handle {
+ import t2 "foobar" 202
+ }
+ }`,
+ errorFunc: func(err error) bool {
+ return err == nil
+ },
+ },
+ } {
+ adapter := caddyfile.Adapter{
+ ServerType: ServerType{},
+ }
+
+ _, _, err := adapter.Adapt([]byte(tc.input), nil)
+
+ if !tc.errorFunc(err) {
+ t.Errorf("Test %d error expectation failed, got %s", i, err)
+ continue
+ }
+ }
+}
diff --git a/caddyconfig/httpcaddyfile/directives.go b/caddyconfig/httpcaddyfile/directives.go
new file mode 100644
index 00000000000..f0687a7e937
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/directives.go
@@ -0,0 +1,640 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "encoding/json"
+ "net"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+// defaultDirectiveOrder specifies the default order
+// to apply directives in HTTP routes. This must only
+// consist of directives that are included in Caddy's
+// standard distribution.
+//
+// e.g. The 'root' directive goes near the start in
+// case rewrites or redirects depend on existence of
+// files, i.e. the file matcher, which must know the
+// root first.
+//
+// e.g. The 'header' directive goes before 'redir' so
+// that headers can be manipulated before doing redirects.
+//
+// e.g. The 'respond' directive is near the end because it
+// writes a response and terminates the middleware chain.
+var defaultDirectiveOrder = []string{
+ "tracing",
+
+ // set variables that may be used by other directives
+ "map",
+ "vars",
+ "fs",
+ "root",
+ "log_append",
+ "skip_log", // TODO: deprecated, renamed to log_skip
+ "log_skip",
+ "log_name",
+
+ "header",
+ "copy_response_headers", // only in reverse_proxy's handle_response
+ "request_body",
+
+ "redir",
+
+ // incoming request manipulation
+ "method",
+ "rewrite",
+ "uri",
+ "try_files",
+
+ // middleware handlers; some wrap responses
+ "basicauth", // TODO: deprecated, renamed to basic_auth
+ "basic_auth",
+ "forward_auth",
+ "request_header",
+ "encode",
+ "push",
+ "intercept",
+ "templates",
+
+ // special routing & dispatching directives
+ "invoke",
+ "handle",
+ "handle_path",
+ "route",
+
+ // handlers that typically respond to requests
+ "abort",
+ "error",
+ "copy_response", // only in reverse_proxy's handle_response
+ "respond",
+ "metrics",
+ "reverse_proxy",
+ "php_fastcgi",
+ "file_server",
+ "acme_server",
+}
+
+// directiveOrder specifies the order to apply directives
+// in HTTP routes, after being modified by either the
+// plugins or by the user via the "order" global option.
+var directiveOrder = defaultDirectiveOrder
+
+// RegisterDirective registers a unique directive dir with an
+// associated unmarshaling (setup) function. When directive dir
+// is encountered in a Caddyfile, setupFunc will be called to
+// unmarshal its tokens.
+func RegisterDirective(dir string, setupFunc UnmarshalFunc) {
+ if _, ok := registeredDirectives[dir]; ok {
+ panic("directive " + dir + " already registered")
+ }
+ registeredDirectives[dir] = setupFunc
+}
+
+// RegisterHandlerDirective is like RegisterDirective, but for
+// directives which specifically output only an HTTP handler.
+// Directives registered with this function will always have
+// an optional matcher token as the first argument.
+func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) {
+ RegisterDirective(dir, func(h Helper) ([]ConfigValue, error) {
+ if !h.Next() {
+ return nil, h.ArgErr()
+ }
+
+ matcherSet, err := h.ExtractMatcherSet()
+ if err != nil {
+ return nil, err
+ }
+
+ val, err := setupFunc(h)
+ if err != nil {
+ return nil, err
+ }
+
+ return h.NewRoute(matcherSet, val), nil
+ })
+}
+
+// RegisterDirectiveOrder registers the default order for a
+// directive from a plugin.
+//
+// This is useful when a plugin has a well-understood place
+// it should run in the middleware pipeline, and it allows
+// users to avoid having to define the order themselves.
+//
+// The directive dir may be placed in the position relative
+// to ('before' or 'after') a directive included in Caddy's
+// standard distribution. It cannot be relative to another
+// plugin's directive.
+//
+// EXPERIMENTAL: This API may change or be removed.
+func RegisterDirectiveOrder(dir string, position Positional, standardDir string) {
+ // check if directive was already ordered
+ if slices.Contains(directiveOrder, dir) {
+ panic("directive '" + dir + "' already ordered")
+ }
+
+ if position != Before && position != After {
+ panic("the 2nd argument must be either 'before' or 'after', got '" + position + "'")
+ }
+
+ // check if directive exists in standard distribution, since
+ // we can't allow plugins to depend on one another; we can't
+ // guarantee the order that plugins are loaded in.
+ foundStandardDir := slices.Contains(defaultDirectiveOrder, standardDir)
+ if !foundStandardDir {
+ panic("the 3rd argument '" + standardDir + "' must be a directive that exists in the standard distribution of Caddy")
+ }
+
+ // insert directive into proper position
+ newOrder := directiveOrder
+ for i, d := range newOrder {
+ if d != standardDir {
+ continue
+ }
+ if position == Before {
+ newOrder = append(newOrder[:i], append([]string{dir}, newOrder[i:]...)...)
+ } else if position == After {
+ newOrder = append(newOrder[:i+1], append([]string{dir}, newOrder[i+1:]...)...)
+ }
+ break
+ }
+ directiveOrder = newOrder
+}
+
+// RegisterGlobalOption registers a unique global option opt with
+// an associated unmarshaling (setup) function. When the global
+// option opt is encountered in a Caddyfile, setupFunc will be
+// called to unmarshal its tokens.
+func RegisterGlobalOption(opt string, setupFunc UnmarshalGlobalFunc) {
+ if _, ok := registeredGlobalOptions[opt]; ok {
+ panic("global option " + opt + " already registered")
+ }
+ registeredGlobalOptions[opt] = setupFunc
+}
+
+// Helper is a type which helps setup a value from
+// Caddyfile tokens.
+type Helper struct {
+ *caddyfile.Dispenser
+ // State stores intermediate variables during caddyfile adaptation.
+ State map[string]any
+ options map[string]any
+ warnings *[]caddyconfig.Warning
+ matcherDefs map[string]caddy.ModuleMap
+ parentBlock caddyfile.ServerBlock
+ groupCounter counter
+}
+
+// Option gets the option keyed by name.
+func (h Helper) Option(name string) any {
+ return h.options[name]
+}
+
+// Caddyfiles returns the list of config files from
+// which tokens in the current server block were loaded.
+func (h Helper) Caddyfiles() []string {
+ // first obtain set of names of files involved
+ // in this server block, without duplicates
+ files := make(map[string]struct{})
+ for _, segment := range h.parentBlock.Segments {
+ for _, token := range segment {
+ files[token.File] = struct{}{}
+ }
+ }
+ // then convert the set into a slice
+ filesSlice := make([]string, 0, len(files))
+ for file := range files {
+ filesSlice = append(filesSlice, file)
+ }
+ sort.Strings(filesSlice)
+ return filesSlice
+}
+
+// JSON converts val into JSON. Any errors are added to warnings.
+func (h Helper) JSON(val any) json.RawMessage {
+ return caddyconfig.JSON(val, h.warnings)
+}
+
+// MatcherToken assumes the next argument token is (possibly) a matcher,
+// and if so, returns the matcher set along with a true value. If the next
+// token is not a matcher, nil and false is returned. Note that a true
+// value may be returned with a nil matcher set if it is a catch-all.
+func (h Helper) MatcherToken() (caddy.ModuleMap, bool, error) {
+ if !h.NextArg() {
+ return nil, false, nil
+ }
+ return matcherSetFromMatcherToken(h.Dispenser.Token(), h.matcherDefs, h.warnings)
+}
+
+// ExtractMatcherSet is like MatcherToken, except this is a higher-level
+// method that returns the matcher set described by the matcher token,
+// or nil if there is none, and deletes the matcher token from the
+// dispenser and resets it as if this look-ahead never happened. Useful
+// when wrapping a route (one or more handlers) in a user-defined matcher.
+func (h Helper) ExtractMatcherSet() (caddy.ModuleMap, error) {
+ matcherSet, hasMatcher, err := h.MatcherToken()
+ if err != nil {
+ return nil, err
+ }
+ if hasMatcher {
+ // strip matcher token; we don't need to
+ // use the return value here because a
+ // new dispenser should have been made
+ // solely for this directive's tokens,
+ // with no other uses of same slice
+ h.Dispenser.Delete()
+ }
+ h.Dispenser.Reset() // pretend this lookahead never happened
+ return matcherSet, nil
+}
+
+// NewRoute returns config values relevant to creating a new HTTP route.
+func (h Helper) NewRoute(matcherSet caddy.ModuleMap,
+ handler caddyhttp.MiddlewareHandler,
+) []ConfigValue {
+ mod, err := caddy.GetModule(caddy.GetModuleID(handler))
+ if err != nil {
+ *h.warnings = append(*h.warnings, caddyconfig.Warning{
+ File: h.File(),
+ Line: h.Line(),
+ Message: err.Error(),
+ })
+ return nil
+ }
+ var matcherSetsRaw []caddy.ModuleMap
+ if matcherSet != nil {
+ matcherSetsRaw = append(matcherSetsRaw, matcherSet)
+ }
+ return []ConfigValue{
+ {
+ Class: "route",
+ Value: caddyhttp.Route{
+ MatcherSetsRaw: matcherSetsRaw,
+ HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", mod.ID.Name(), h.warnings)},
+ },
+ },
+ }
+}
+
+// GroupRoutes adds the routes (caddyhttp.Route type) in vals to the
+// same group, if there is more than one route in vals.
+func (h Helper) GroupRoutes(vals []ConfigValue) {
+ // ensure there's at least two routes; group of one is pointless
+ var count int
+ for _, v := range vals {
+ if _, ok := v.Value.(caddyhttp.Route); ok {
+ count++
+ if count > 1 {
+ break
+ }
+ }
+ }
+ if count < 2 {
+ return
+ }
+
+ // now that we know the group will have some effect, do it
+ groupName := h.groupCounter.nextGroup()
+ for i := range vals {
+ if route, ok := vals[i].Value.(caddyhttp.Route); ok {
+ route.Group = groupName
+ vals[i].Value = route
+ }
+ }
+}
+
+// WithDispenser returns a new instance based on d. All others Helper
+// fields are copied, so typically maps are shared with this new instance.
+func (h Helper) WithDispenser(d *caddyfile.Dispenser) Helper {
+ h.Dispenser = d
+ return h
+}
+
+// ParseSegmentAsSubroute parses the segment such that its subdirectives
+// are themselves treated as directives, from which a subroute is built
+// and returned.
+func ParseSegmentAsSubroute(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ allResults, err := parseSegmentAsConfig(h)
+ if err != nil {
+ return nil, err
+ }
+
+ return buildSubroute(allResults, h.groupCounter, true)
+}
+
+// parseSegmentAsConfig parses the segment such that its subdirectives
+// are themselves treated as directives, including named matcher definitions,
+// and the raw Config structs are returned.
+func parseSegmentAsConfig(h Helper) ([]ConfigValue, error) {
+ var allResults []ConfigValue
+
+ for h.Next() {
+ // don't allow non-matcher args on the first line
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+
+ // slice the linear list of tokens into top-level segments
+ var segments []caddyfile.Segment
+ for nesting := h.Nesting(); h.NextBlock(nesting); {
+ segments = append(segments, h.NextSegment())
+ }
+
+ // copy existing matcher definitions so we can augment
+ // new ones that are defined only in this scope
+ matcherDefs := make(map[string]caddy.ModuleMap, len(h.matcherDefs))
+ for key, val := range h.matcherDefs {
+ matcherDefs[key] = val
+ }
+
+ // find and extract any embedded matcher definitions in this scope
+ for i := 0; i < len(segments); i++ {
+ seg := segments[i]
+ if strings.HasPrefix(seg.Directive(), matcherPrefix) {
+ // parse, then add the matcher to matcherDefs
+ err := parseMatcherDefinitions(caddyfile.NewDispenser(seg), matcherDefs)
+ if err != nil {
+ return nil, err
+ }
+ // remove the matcher segment (consumed), then step back the loop
+ segments = append(segments[:i], segments[i+1:]...)
+ i--
+ }
+ }
+
+ // with matchers ready to go, evaluate each directive's segment
+ for _, seg := range segments {
+ dir := seg.Directive()
+ dirFunc, ok := registeredDirectives[dir]
+ if !ok {
+ return nil, h.Errf("unrecognized directive: %s - are you sure your Caddyfile structure (nesting and braces) is correct?", dir)
+ }
+
+ subHelper := h
+ subHelper.Dispenser = caddyfile.NewDispenser(seg)
+ subHelper.matcherDefs = matcherDefs
+
+ results, err := dirFunc(subHelper)
+ if err != nil {
+ return nil, h.Errf("parsing caddyfile tokens for '%s': %v", dir, err)
+ }
+
+ dir = normalizeDirectiveName(dir)
+
+ for _, result := range results {
+ result.directive = dir
+ allResults = append(allResults, result)
+ }
+ }
+ }
+
+ return allResults, nil
+}
+
+// ConfigValue represents a value to be added to the final
+// configuration, or a value to be consulted when building
+// the final configuration.
+type ConfigValue struct {
+ // The kind of value this is. As the config is
+ // being built, the adapter will look in the
+ // "pile" for values belonging to a certain
+ // class when it is setting up a certain part
+ // of the config. The associated value will be
+ // type-asserted and placed accordingly.
+ Class string
+
+ // The value to be used when building the config.
+ // Generally its type is associated with the
+ // name of the Class.
+ Value any
+
+ directive string
+}
+
+func sortRoutes(routes []ConfigValue) {
+ dirPositions := make(map[string]int)
+ for i, dir := range directiveOrder {
+ dirPositions[dir] = i
+ }
+
+ sort.SliceStable(routes, func(i, j int) bool {
+ // if the directives are different, just use the established directive order
+ iDir, jDir := routes[i].directive, routes[j].directive
+ if iDir != jDir {
+ return dirPositions[iDir] < dirPositions[jDir]
+ }
+
+ // directives are the same; sub-sort by path matcher length if there's
+ // only one matcher set and one path (this is a very common case and
+ // usually -- but not always -- helpful/expected, oh well; user can
+ // always take manual control of order using handler or route blocks)
+ iRoute, ok := routes[i].Value.(caddyhttp.Route)
+ if !ok {
+ return false
+ }
+ jRoute, ok := routes[j].Value.(caddyhttp.Route)
+ if !ok {
+ return false
+ }
+
+ // decode the path matchers if there is just one matcher set
+ var iPM, jPM caddyhttp.MatchPath
+ if len(iRoute.MatcherSetsRaw) == 1 {
+ _ = json.Unmarshal(iRoute.MatcherSetsRaw[0]["path"], &iPM)
+ }
+ if len(jRoute.MatcherSetsRaw) == 1 {
+ _ = json.Unmarshal(jRoute.MatcherSetsRaw[0]["path"], &jPM)
+ }
+
+ // if there is only one path in the path matcher, sort by longer path
+ // (more specific) first; missing path matchers or multi-matchers are
+ // treated as zero-length paths
+ var iPathLen, jPathLen int
+ if len(iPM) == 1 {
+ iPathLen = len(iPM[0])
+ }
+ if len(jPM) == 1 {
+ jPathLen = len(jPM[0])
+ }
+
+ sortByPath := func() bool {
+ // we can only confidently compare path lengths if both
+ // directives have a single path to match (issue #5037)
+ if iPathLen > 0 && jPathLen > 0 {
+ // if both paths are the same except for a trailing wildcard,
+ // sort by the shorter path first (which is more specific)
+ if strings.TrimSuffix(iPM[0], "*") == strings.TrimSuffix(jPM[0], "*") {
+ return iPathLen < jPathLen
+ }
+
+ // sort most-specific (longest) path first
+ return iPathLen > jPathLen
+ }
+
+ // if both directives don't have a single path to compare,
+ // sort whichever one has a matcher first; if both have
+ // a matcher, sort equally (stable sort preserves order)
+ return len(iRoute.MatcherSetsRaw) > 0 && len(jRoute.MatcherSetsRaw) == 0
+ }()
+
+ // some directives involve setting values which can overwrite
+ // each other, so it makes most sense to reverse the order so
+ // that the least-specific matcher is first, allowing the last
+ // matching one to win
+ if iDir == "vars" {
+ return !sortByPath
+ }
+
+ // everything else is most-specific matcher first
+ return sortByPath
+ })
+}
+
+// serverBlock pairs a Caddyfile server block with
+// a "pile" of config values, keyed by class name,
+// as well as its parsed keys for convenience.
+type serverBlock struct {
+ block caddyfile.ServerBlock
+ pile map[string][]ConfigValue // config values obtained from directives
+ parsedKeys []Address
+}
+
+// hostsFromKeys returns a list of all the non-empty hostnames found in
+// the keys of the server block sb. If logger mode is false, a key with
+// an empty hostname portion will return an empty slice, since that
+// server block is interpreted to effectively match all hosts. An empty
+// string is never added to the slice.
+//
+// If loggerMode is true, then the non-standard ports of keys will be
+// joined to the hostnames. This is to effectively match the Host
+// header of requests that come in for that key.
+//
+// The resulting slice is not sorted but will never have duplicates.
+func (sb serverBlock) hostsFromKeys(loggerMode bool) []string {
+ // ensure each entry in our list is unique
+ hostMap := make(map[string]struct{})
+ for _, addr := range sb.parsedKeys {
+ if addr.Host == "" {
+ if !loggerMode {
+ // server block contains a key like ":443", i.e. the host portion
+ // is empty / catch-all, which means to match all hosts
+ return []string{}
+ }
+ // never append an empty string
+ continue
+ }
+ if loggerMode &&
+ addr.Port != "" &&
+ addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort) &&
+ addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort) {
+ hostMap[net.JoinHostPort(addr.Host, addr.Port)] = struct{}{}
+ } else {
+ hostMap[addr.Host] = struct{}{}
+ }
+ }
+
+ // convert map to slice
+ sblockHosts := make([]string, 0, len(hostMap))
+ for host := range hostMap {
+ sblockHosts = append(sblockHosts, host)
+ }
+
+ return sblockHosts
+}
+
+func (sb serverBlock) hostsFromKeysNotHTTP(httpPort string) []string {
+ // ensure each entry in our list is unique
+ hostMap := make(map[string]struct{})
+ for _, addr := range sb.parsedKeys {
+ if addr.Host == "" {
+ continue
+ }
+ if addr.Scheme != "http" && addr.Port != httpPort {
+ hostMap[addr.Host] = struct{}{}
+ }
+ }
+
+ // convert map to slice
+ sblockHosts := make([]string, 0, len(hostMap))
+ for host := range hostMap {
+ sblockHosts = append(sblockHosts, host)
+ }
+
+ return sblockHosts
+}
+
+// hasHostCatchAllKey returns true if sb has a key that
+// omits a host portion, i.e. it "catches all" hosts.
+func (sb serverBlock) hasHostCatchAllKey() bool {
+ return slices.ContainsFunc(sb.parsedKeys, func(addr Address) bool {
+ return addr.Host == ""
+ })
+}
+
+// isAllHTTP returns true if all sb keys explicitly specify
+// the http:// scheme
+func (sb serverBlock) isAllHTTP() bool {
+ return !slices.ContainsFunc(sb.parsedKeys, func(addr Address) bool {
+ return addr.Scheme != "http"
+ })
+}
+
+// Positional are the supported modes for ordering directives.
+type Positional string
+
+const (
+ Before Positional = "before"
+ After Positional = "after"
+ First Positional = "first"
+ Last Positional = "last"
+)
+
+type (
+ // UnmarshalFunc is a function which can unmarshal Caddyfile
+ // tokens into zero or more config values using a Helper type.
+ // These are passed in a call to RegisterDirective.
+ UnmarshalFunc func(h Helper) ([]ConfigValue, error)
+
+ // UnmarshalHandlerFunc is like UnmarshalFunc, except the
+ // output of the unmarshaling is an HTTP handler. This
+ // function does not need to deal with HTTP request matching
+ // which is abstracted away. Since writing HTTP handlers
+ // with Caddyfile support is very common, this is a more
+ // convenient way to add a handler to the chain since a lot
+ // of the details common to HTTP handlers are taken care of
+ // for you. These are passed to a call to
+ // RegisterHandlerDirective.
+ UnmarshalHandlerFunc func(h Helper) (caddyhttp.MiddlewareHandler, error)
+
+ // UnmarshalGlobalFunc is a function which can unmarshal Caddyfile
+ // tokens from a global option. It is passed the tokens to parse and
+ // existing value from the previous instance of this global option
+ // (if any). It returns the value to associate with this global option.
+ UnmarshalGlobalFunc func(d *caddyfile.Dispenser, existingVal any) (any, error)
+)
+
+var registeredDirectives = make(map[string]UnmarshalFunc)
+
+var registeredGlobalOptions = make(map[string]UnmarshalGlobalFunc)
diff --git a/caddyconfig/httpcaddyfile/directives_test.go b/caddyconfig/httpcaddyfile/directives_test.go
new file mode 100644
index 00000000000..2b4d3e6c370
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/directives_test.go
@@ -0,0 +1,97 @@
+package httpcaddyfile
+
+import (
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestHostsFromKeys(t *testing.T) {
+ for i, tc := range []struct {
+ keys []Address
+ expectNormalMode []string
+ expectLoggerMode []string
+ }{
+ {
+ []Address{
+ {Original: "foo", Host: "foo"},
+ },
+ []string{"foo"},
+ []string{"foo"},
+ },
+ {
+ []Address{
+ {Original: "foo", Host: "foo"},
+ {Original: "bar", Host: "bar"},
+ },
+ []string{"bar", "foo"},
+ []string{"bar", "foo"},
+ },
+ {
+ []Address{
+ {Original: ":2015", Port: "2015"},
+ },
+ []string{},
+ []string{},
+ },
+ {
+ []Address{
+ {Original: ":443", Port: "443"},
+ },
+ []string{},
+ []string{},
+ },
+ {
+ []Address{
+ {Original: "foo", Host: "foo"},
+ {Original: ":2015", Port: "2015"},
+ },
+ []string{},
+ []string{"foo"},
+ },
+ {
+ []Address{
+ {Original: "example.com:2015", Host: "example.com", Port: "2015"},
+ },
+ []string{"example.com"},
+ []string{"example.com:2015"},
+ },
+ {
+ []Address{
+ {Original: "example.com:80", Host: "example.com", Port: "80"},
+ },
+ []string{"example.com"},
+ []string{"example.com"},
+ },
+ {
+ []Address{
+ {Original: "https://:2015/foo", Scheme: "https", Port: "2015", Path: "/foo"},
+ },
+ []string{},
+ []string{},
+ },
+ {
+ []Address{
+ {Original: "https://example.com:2015/foo", Scheme: "https", Host: "example.com", Port: "2015", Path: "/foo"},
+ },
+ []string{"example.com"},
+ []string{"example.com:2015"},
+ },
+ } {
+ sb := serverBlock{parsedKeys: tc.keys}
+
+ // test in normal mode
+ actual := sb.hostsFromKeys(false)
+ sort.Strings(actual)
+ if !reflect.DeepEqual(tc.expectNormalMode, actual) {
+ t.Errorf("Test %d (loggerMode=false): Expected: %v Actual: %v", i, tc.expectNormalMode, actual)
+ }
+
+ // test in logger mode
+ actual = sb.hostsFromKeys(true)
+ sort.Strings(actual)
+ if !reflect.DeepEqual(tc.expectLoggerMode, actual) {
+ t.Errorf("Test %d (loggerMode=true): Expected: %v Actual: %v", i, tc.expectLoggerMode, actual)
+ }
+ }
+}
diff --git a/caddyconfig/httpcaddyfile/httptype.go b/caddyconfig/httpcaddyfile/httptype.go
new file mode 100644
index 00000000000..37a6f6b23cd
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/httptype.go
@@ -0,0 +1,1748 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "cmp"
+ "encoding/json"
+ "fmt"
+ "net"
+ "reflect"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+
+ "go.uber.org/zap"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddypki"
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
+)
+
+func init() {
+ caddyconfig.RegisterAdapter("caddyfile", caddyfile.Adapter{ServerType: ServerType{}})
+}
+
+// App represents the configuration for a non-standard
+// Caddy app module (e.g. third-party plugin) which was
+// parsed from a global options block.
+type App struct {
+ // The JSON key for the app being configured
+ Name string
+
+ // The raw app config as JSON
+ Value json.RawMessage
+}
+
+// ServerType can set up a config from an HTTP Caddyfile.
+type ServerType struct{}
+
+// Setup makes a config from the tokens.
+func (st ServerType) Setup(
+ inputServerBlocks []caddyfile.ServerBlock,
+ options map[string]any,
+) (*caddy.Config, []caddyconfig.Warning, error) {
+ var warnings []caddyconfig.Warning
+ gc := counter{new(int)}
+ state := make(map[string]any)
+
+ // load all the server blocks and associate them with a "pile" of config values
+ originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks))
+ for _, sblock := range inputServerBlocks {
+ for j, k := range sblock.Keys {
+ if j == 0 && strings.HasPrefix(k.Text, "@") {
+ return nil, warnings, fmt.Errorf("%s:%d: cannot define a matcher outside of a site block: '%s'", k.File, k.Line, k.Text)
+ }
+ if _, ok := registeredDirectives[k.Text]; ok {
+ return nil, warnings, fmt.Errorf("%s:%d: parsed '%s' as a site address, but it is a known directive; directives must appear in a site block", k.File, k.Line, k.Text)
+ }
+ }
+ originalServerBlocks = append(originalServerBlocks, serverBlock{
+ block: sblock,
+ pile: make(map[string][]ConfigValue),
+ })
+ }
+
+ // apply any global options
+ var err error
+ originalServerBlocks, err = st.evaluateGlobalOptionsBlock(originalServerBlocks, options)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ // this will replace both static and user-defined placeholder shorthands
+ // with actual identifiers used by Caddy
+ replacer := NewShorthandReplacer()
+
+ originalServerBlocks, err = st.extractNamedRoutes(originalServerBlocks, options, &warnings, replacer)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ for _, sb := range originalServerBlocks {
+ for i := range sb.block.Segments {
+ replacer.ApplyToSegment(&sb.block.Segments[i])
+ }
+
+ if len(sb.block.Keys) == 0 {
+ return nil, warnings, fmt.Errorf("server block without any key is global configuration, and if used, it must be first")
+ }
+
+ // extract matcher definitions
+ matcherDefs := make(map[string]caddy.ModuleMap)
+ for _, segment := range sb.block.Segments {
+ if dir := segment.Directive(); strings.HasPrefix(dir, matcherPrefix) {
+ d := sb.block.DispenseDirective(dir)
+ err := parseMatcherDefinitions(d, matcherDefs)
+ if err != nil {
+ return nil, warnings, err
+ }
+ }
+ }
+
+ // evaluate each directive ("segment") in this block
+ for _, segment := range sb.block.Segments {
+ dir := segment.Directive()
+
+ if strings.HasPrefix(dir, matcherPrefix) {
+ // matcher definitions were pre-processed
+ continue
+ }
+
+ dirFunc, ok := registeredDirectives[dir]
+ if !ok {
+ tkn := segment[0]
+ message := "%s:%d: unrecognized directive: %s"
+ if !sb.block.HasBraces {
+ message += "\nDid you mean to define a second site? If so, you must use curly braces around each site to separate their configurations."
+ }
+ return nil, warnings, fmt.Errorf(message, tkn.File, tkn.Line, dir)
+ }
+
+ h := Helper{
+ Dispenser: caddyfile.NewDispenser(segment),
+ options: options,
+ warnings: &warnings,
+ matcherDefs: matcherDefs,
+ parentBlock: sb.block,
+ groupCounter: gc,
+ State: state,
+ }
+
+ results, err := dirFunc(h)
+ if err != nil {
+ return nil, warnings, fmt.Errorf("parsing caddyfile tokens for '%s': %v", dir, err)
+ }
+
+ dir = normalizeDirectiveName(dir)
+
+ for _, result := range results {
+ result.directive = dir
+ sb.pile[result.Class] = append(sb.pile[result.Class], result)
+ }
+
+ // specially handle named routes that were pulled out from
+ // the invoke directive, which could be nested anywhere within
+ // some subroutes in this directive; we add them to the pile
+ // for this server block
+ if state[namedRouteKey] != nil {
+ for name := range state[namedRouteKey].(map[string]struct{}) {
+ result := ConfigValue{Class: namedRouteKey, Value: name}
+ sb.pile[result.Class] = append(sb.pile[result.Class], result)
+ }
+ state[namedRouteKey] = nil
+ }
+ }
+ }
+
+ // map
+ sbmap, err := st.mapAddressToProtocolToServerBlocks(originalServerBlocks, options)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ // reduce
+ pairings := st.consolidateAddrMappings(sbmap)
+
+ // each pairing of listener addresses to list of server
+ // blocks is basically a server definition
+ servers, err := st.serversFromPairings(pairings, options, &warnings, gc)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ // hoist the metrics config from per-server to global
+ metrics, _ := options["metrics"].(*caddyhttp.Metrics)
+ for _, s := range servers {
+ if s.Metrics != nil {
+ metrics = cmp.Or[*caddyhttp.Metrics](metrics, &caddyhttp.Metrics{})
+ metrics = &caddyhttp.Metrics{
+ PerHost: metrics.PerHost || s.Metrics.PerHost,
+ }
+ s.Metrics = nil // we don't need it anymore
+ }
+ }
+
+ // now that each server is configured, make the HTTP app
+ httpApp := caddyhttp.App{
+ HTTPPort: tryInt(options["http_port"], &warnings),
+ HTTPSPort: tryInt(options["https_port"], &warnings),
+ GracePeriod: tryDuration(options["grace_period"], &warnings),
+ ShutdownDelay: tryDuration(options["shutdown_delay"], &warnings),
+ Metrics: metrics,
+ Servers: servers,
+ }
+
+ // then make the TLS app
+ tlsApp, warnings, err := st.buildTLSApp(pairings, options, warnings)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ // then make the PKI app
+ pkiApp, warnings, err := st.buildPKIApp(pairings, options, warnings)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ // extract any custom logs, and enforce configured levels
+ var customLogs []namedCustomLog
+ var hasDefaultLog bool
+ addCustomLog := func(ncl namedCustomLog) {
+ if ncl.name == "" {
+ return
+ }
+ if ncl.name == caddy.DefaultLoggerName {
+ hasDefaultLog = true
+ }
+ if _, ok := options["debug"]; ok && ncl.log != nil && ncl.log.Level == "" {
+ ncl.log.Level = zap.DebugLevel.CapitalString()
+ }
+ customLogs = append(customLogs, ncl)
+ }
+
+ // Apply global log options, when set
+ if options["log"] != nil {
+ for _, logValue := range options["log"].([]ConfigValue) {
+ addCustomLog(logValue.Value.(namedCustomLog))
+ }
+ }
+
+ if !hasDefaultLog {
+ // if the default log was not customized, ensure we
+ // configure it with any applicable options
+ if _, ok := options["debug"]; ok {
+ customLogs = append(customLogs, namedCustomLog{
+ name: caddy.DefaultLoggerName,
+ log: &caddy.CustomLog{
+ BaseLog: caddy.BaseLog{Level: zap.DebugLevel.CapitalString()},
+ },
+ })
+ }
+ }
+
+ // Apply server-specific log options
+ for _, p := range pairings {
+ for _, sb := range p.serverBlocks {
+ for _, clVal := range sb.pile["custom_log"] {
+ addCustomLog(clVal.Value.(namedCustomLog))
+ }
+ }
+ }
+
+ // annnd the top-level config, then we're done!
+ cfg := &caddy.Config{AppsRaw: make(caddy.ModuleMap)}
+
+ // loop through the configured options, and if any of
+ // them are an httpcaddyfile App, then we insert them
+ // into the config as raw Caddy apps
+ for _, opt := range options {
+ if app, ok := opt.(App); ok {
+ cfg.AppsRaw[app.Name] = app.Value
+ }
+ }
+
+ // insert the standard Caddy apps into the config
+ if len(httpApp.Servers) > 0 {
+ cfg.AppsRaw["http"] = caddyconfig.JSON(httpApp, &warnings)
+ }
+ if !reflect.DeepEqual(tlsApp, &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}) {
+ cfg.AppsRaw["tls"] = caddyconfig.JSON(tlsApp, &warnings)
+ }
+ if !reflect.DeepEqual(pkiApp, &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}) {
+ cfg.AppsRaw["pki"] = caddyconfig.JSON(pkiApp, &warnings)
+ }
+ if filesystems, ok := options["filesystem"].(caddy.Module); ok {
+ cfg.AppsRaw["caddy.filesystems"] = caddyconfig.JSON(
+ filesystems,
+ &warnings)
+ }
+
+ if storageCvtr, ok := options["storage"].(caddy.StorageConverter); ok {
+ cfg.StorageRaw = caddyconfig.JSONModuleObject(storageCvtr,
+ "module",
+ storageCvtr.(caddy.Module).CaddyModule().ID.Name(),
+ &warnings)
+ }
+ if adminConfig, ok := options["admin"].(*caddy.AdminConfig); ok && adminConfig != nil {
+ cfg.Admin = adminConfig
+ }
+ if pc, ok := options["persist_config"].(string); ok && pc == "off" {
+ if cfg.Admin == nil {
+ cfg.Admin = new(caddy.AdminConfig)
+ }
+ if cfg.Admin.Config == nil {
+ cfg.Admin.Config = new(caddy.ConfigSettings)
+ }
+ cfg.Admin.Config.Persist = new(bool)
+ }
+
+ if len(customLogs) > 0 {
+ if cfg.Logging == nil {
+ cfg.Logging = &caddy.Logging{
+ Logs: make(map[string]*caddy.CustomLog),
+ }
+ }
+
+ // Add the default log first if defined, so that it doesn't
+ // accidentally get re-created below due to the Exclude logic
+ for _, ncl := range customLogs {
+ if ncl.name == caddy.DefaultLoggerName && ncl.log != nil {
+ cfg.Logging.Logs[caddy.DefaultLoggerName] = ncl.log
+ break
+ }
+ }
+
+ // Add the rest of the custom logs
+ for _, ncl := range customLogs {
+ if ncl.log == nil || ncl.name == caddy.DefaultLoggerName {
+ continue
+ }
+ if ncl.name != "" {
+ cfg.Logging.Logs[ncl.name] = ncl.log
+ }
+ // most users seem to prefer not writing access logs
+ // to the default log when they are directed to a
+ // file or have any other special customization
+ if ncl.name != caddy.DefaultLoggerName && len(ncl.log.Include) > 0 {
+ defaultLog, ok := cfg.Logging.Logs[caddy.DefaultLoggerName]
+ if !ok {
+ defaultLog = new(caddy.CustomLog)
+ cfg.Logging.Logs[caddy.DefaultLoggerName] = defaultLog
+ }
+ defaultLog.Exclude = append(defaultLog.Exclude, ncl.log.Include...)
+
+ // avoid duplicates by sorting + compacting
+ sort.Strings(defaultLog.Exclude)
+ defaultLog.Exclude = slices.Compact[[]string, string](defaultLog.Exclude)
+ }
+ }
+ // we may have not actually added anything, so remove if empty
+ if len(cfg.Logging.Logs) == 0 {
+ cfg.Logging = nil
+ }
+ }
+
+ return cfg, warnings, nil
+}
+
+// evaluateGlobalOptionsBlock evaluates the global options block,
+// which is expected to be the first server block if it has zero
+// keys. It returns the updated list of server blocks with the
+// global options block removed, and updates options accordingly.
+func (ServerType) evaluateGlobalOptionsBlock(serverBlocks []serverBlock, options map[string]any) ([]serverBlock, error) {
+ if len(serverBlocks) == 0 || len(serverBlocks[0].block.Keys) > 0 {
+ return serverBlocks, nil
+ }
+
+ for _, segment := range serverBlocks[0].block.Segments {
+ opt := segment.Directive()
+ var val any
+ var err error
+ disp := caddyfile.NewDispenser(segment)
+
+ optFunc, ok := registeredGlobalOptions[opt]
+ if !ok {
+ tkn := segment[0]
+ return nil, fmt.Errorf("%s:%d: unrecognized global option: %s", tkn.File, tkn.Line, opt)
+ }
+
+ val, err = optFunc(disp, options[opt])
+ if err != nil {
+ return nil, fmt.Errorf("parsing caddyfile tokens for '%s': %v", opt, err)
+ }
+
+ // As a special case, fold multiple "servers" options together
+ // in an array instead of overwriting a possible existing value
+ if opt == "servers" {
+ existingOpts, ok := options[opt].([]serverOptions)
+ if !ok {
+ existingOpts = []serverOptions{}
+ }
+ serverOpts, ok := val.(serverOptions)
+ if !ok {
+ return nil, fmt.Errorf("unexpected type from 'servers' global options: %T", val)
+ }
+ options[opt] = append(existingOpts, serverOpts)
+ continue
+ }
+ // Additionally, fold multiple "log" options together into an
+ // array so that multiple loggers can be configured.
+ if opt == "log" {
+ existingOpts, ok := options[opt].([]ConfigValue)
+ if !ok {
+ existingOpts = []ConfigValue{}
+ }
+ logOpts, ok := val.([]ConfigValue)
+ if !ok {
+ return nil, fmt.Errorf("unexpected type from 'log' global options: %T", val)
+ }
+ options[opt] = append(existingOpts, logOpts...)
+ continue
+ }
+ // Also fold multiple "default_bind" options together into an
+ // array so that server blocks can have multiple binds by default.
+ if opt == "default_bind" {
+ existingOpts, ok := options[opt].([]ConfigValue)
+ if !ok {
+ existingOpts = []ConfigValue{}
+ }
+ defaultBindOpts, ok := val.([]ConfigValue)
+ if !ok {
+ return nil, fmt.Errorf("unexpected type from 'default_bind' global options: %T", val)
+ }
+ options[opt] = append(existingOpts, defaultBindOpts...)
+ continue
+ }
+
+ options[opt] = val
+ }
+
+ // If we got "servers" options, we'll sort them by their listener address
+ if serverOpts, ok := options["servers"].([]serverOptions); ok {
+ sort.Slice(serverOpts, func(i, j int) bool {
+ return len(serverOpts[i].ListenerAddress) > len(serverOpts[j].ListenerAddress)
+ })
+
+ // Reject the config if there are duplicate listener address
+ seen := make(map[string]bool)
+ for _, entry := range serverOpts {
+ if _, alreadySeen := seen[entry.ListenerAddress]; alreadySeen {
+ return nil, fmt.Errorf("cannot have 'servers' global options with duplicate listener addresses: %s", entry.ListenerAddress)
+ }
+ seen[entry.ListenerAddress] = true
+ }
+ }
+
+ return serverBlocks[1:], nil
+}
+
+// extractNamedRoutes pulls out any named route server blocks
+// so they don't get parsed as sites, and stores them in options
+// for later.
+func (ServerType) extractNamedRoutes(
+ serverBlocks []serverBlock,
+ options map[string]any,
+ warnings *[]caddyconfig.Warning,
+ replacer ShorthandReplacer,
+) ([]serverBlock, error) {
+ namedRoutes := map[string]*caddyhttp.Route{}
+
+ gc := counter{new(int)}
+ state := make(map[string]any)
+
+ // copy the server blocks so we can
+ // splice out the named route ones
+ filtered := append([]serverBlock{}, serverBlocks...)
+ index := -1
+
+ for _, sb := range serverBlocks {
+ index++
+ if !sb.block.IsNamedRoute {
+ continue
+ }
+
+ // splice out this block, because we know it's not a real server
+ filtered = append(filtered[:index], filtered[index+1:]...)
+ index--
+
+ if len(sb.block.Segments) == 0 {
+ continue
+ }
+
+ wholeSegment := caddyfile.Segment{}
+ for i := range sb.block.Segments {
+ // replace user-defined placeholder shorthands in extracted named routes
+ replacer.ApplyToSegment(&sb.block.Segments[i])
+
+ // zip up all the segments since ParseSegmentAsSubroute
+ // was designed to take a directive+
+ wholeSegment = append(wholeSegment, sb.block.Segments[i]...)
+ }
+
+ h := Helper{
+ Dispenser: caddyfile.NewDispenser(wholeSegment),
+ options: options,
+ warnings: warnings,
+ matcherDefs: nil,
+ parentBlock: sb.block,
+ groupCounter: gc,
+ State: state,
+ }
+
+ handler, err := ParseSegmentAsSubroute(h)
+ if err != nil {
+ return nil, err
+ }
+ subroute := handler.(*caddyhttp.Subroute)
+ route := caddyhttp.Route{}
+
+ if len(subroute.Routes) == 1 && len(subroute.Routes[0].MatcherSetsRaw) == 0 {
+ // if there's only one route with no matcher, then we can simplify
+ route.HandlersRaw = append(route.HandlersRaw, subroute.Routes[0].HandlersRaw[0])
+ } else {
+ // otherwise we need the whole subroute
+ route.HandlersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", subroute.CaddyModule().ID.Name(), h.warnings)}
+ }
+
+ namedRoutes[sb.block.GetKeysText()[0]] = &route
+ }
+ options["named_routes"] = namedRoutes
+
+ return filtered, nil
+}
+
+// serversFromPairings creates the servers for each pairing of addresses
+// to server blocks. Each pairing is essentially a server definition.
+func (st *ServerType) serversFromPairings(
+ pairings []sbAddrAssociation,
+ options map[string]any,
+ warnings *[]caddyconfig.Warning,
+ groupCounter counter,
+) (map[string]*caddyhttp.Server, error) {
+ servers := make(map[string]*caddyhttp.Server)
+ defaultSNI := tryString(options["default_sni"], warnings)
+ fallbackSNI := tryString(options["fallback_sni"], warnings)
+
+ httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
+ if hp, ok := options["http_port"].(int); ok {
+ httpPort = strconv.Itoa(hp)
+ }
+ httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort)
+ if hsp, ok := options["https_port"].(int); ok {
+ httpsPort = strconv.Itoa(hsp)
+ }
+ autoHTTPS := []string{}
+ if ah, ok := options["auto_https"].([]string); ok {
+ autoHTTPS = ah
+ }
+
+ for i, p := range pairings {
+ // detect ambiguous site definitions: server blocks which
+ // have the same host bound to the same interface (listener
+ // address), otherwise their routes will improperly be added
+ // to the same server (see issue #4635)
+ for j, sblock1 := range p.serverBlocks {
+ for _, key := range sblock1.block.GetKeysText() {
+ for k, sblock2 := range p.serverBlocks {
+ if k == j {
+ continue
+ }
+ if slices.Contains(sblock2.block.GetKeysText(), key) {
+ return nil, fmt.Errorf("ambiguous site definition: %s", key)
+ }
+ }
+ }
+ }
+
+ var (
+ addresses []string
+ protocols [][]string
+ )
+
+ for _, addressWithProtocols := range p.addressesWithProtocols {
+ addresses = append(addresses, addressWithProtocols.address)
+ protocols = append(protocols, addressWithProtocols.protocols)
+ }
+
+ srv := &caddyhttp.Server{
+ Listen: addresses,
+ ListenProtocols: protocols,
+ }
+
+ // remove srv.ListenProtocols[j] if it only contains the default protocols
+ for j, lnProtocols := range srv.ListenProtocols {
+ srv.ListenProtocols[j] = nil
+ for _, lnProtocol := range lnProtocols {
+ if lnProtocol != "" {
+ srv.ListenProtocols[j] = lnProtocols
+ break
+ }
+ }
+ }
+
+ // remove srv.ListenProtocols if it only contains the default protocols for all listen addresses
+ listenProtocols := srv.ListenProtocols
+ srv.ListenProtocols = nil
+ for _, lnProtocols := range listenProtocols {
+ if lnProtocols != nil {
+ srv.ListenProtocols = listenProtocols
+ break
+ }
+ }
+
+ // handle the auto_https global option
+ for _, val := range autoHTTPS {
+ switch val {
+ case "off":
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ srv.AutoHTTPS.Disabled = true
+
+ case "disable_redirects":
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ srv.AutoHTTPS.DisableRedir = true
+
+ case "disable_certs":
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ srv.AutoHTTPS.DisableCerts = true
+
+ case "ignore_loaded_certs":
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ srv.AutoHTTPS.IgnoreLoadedCerts = true
+
+ case "prefer_wildcard":
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ srv.AutoHTTPS.PreferWildcard = true
+ }
+ }
+
+ // Using paths in site addresses is deprecated
+ // See ParseAddress() where parsing should later reject paths
+ // See https://github.com/caddyserver/caddy/pull/4728 for a full explanation
+ for _, sblock := range p.serverBlocks {
+ for _, addr := range sblock.parsedKeys {
+ if addr.Path != "" {
+ caddy.Log().Named("caddyfile").Warn("Using a path in a site address is deprecated; please use the 'handle' directive instead", zap.String("address", addr.String()))
+ }
+ }
+ }
+
+ // sort server blocks by their keys; this is important because
+ // only the first matching site should be evaluated, and we should
+ // attempt to match most specific site first (host and path), in
+ // case their matchers overlap; we do this somewhat naively by
+ // descending sort by length of host then path
+ sort.SliceStable(p.serverBlocks, func(i, j int) bool {
+ // TODO: we could pre-process the specificities for efficiency,
+ // but I don't expect many blocks will have THAT many keys...
+ var iLongestPath, jLongestPath string
+ var iLongestHost, jLongestHost string
+ var iWildcardHost, jWildcardHost bool
+ for _, addr := range p.serverBlocks[i].parsedKeys {
+ if strings.Contains(addr.Host, "*") || addr.Host == "" {
+ iWildcardHost = true
+ }
+ if specificity(addr.Host) > specificity(iLongestHost) {
+ iLongestHost = addr.Host
+ }
+ if specificity(addr.Path) > specificity(iLongestPath) {
+ iLongestPath = addr.Path
+ }
+ }
+ for _, addr := range p.serverBlocks[j].parsedKeys {
+ if strings.Contains(addr.Host, "*") || addr.Host == "" {
+ jWildcardHost = true
+ }
+ if specificity(addr.Host) > specificity(jLongestHost) {
+ jLongestHost = addr.Host
+ }
+ if specificity(addr.Path) > specificity(jLongestPath) {
+ jLongestPath = addr.Path
+ }
+ }
+ // catch-all blocks (blocks with no hostname) should always go
+ // last, even after blocks with wildcard hosts
+ if specificity(iLongestHost) == 0 {
+ return false
+ }
+ if specificity(jLongestHost) == 0 {
+ return true
+ }
+ if iWildcardHost != jWildcardHost {
+ // site blocks that have a key with a wildcard in the hostname
+ // must always be less specific than blocks without one; see
+ // https://github.com/caddyserver/caddy/issues/3410
+ return jWildcardHost && !iWildcardHost
+ }
+ if specificity(iLongestHost) == specificity(jLongestHost) {
+ return len(iLongestPath) > len(jLongestPath)
+ }
+ return specificity(iLongestHost) > specificity(jLongestHost)
+ })
+
+ // collect all hosts that have a wildcard in them
+ wildcardHosts := []string{}
+ for _, sblock := range p.serverBlocks {
+ for _, addr := range sblock.parsedKeys {
+ if strings.HasPrefix(addr.Host, "*.") {
+ wildcardHosts = append(wildcardHosts, addr.Host[2:])
+ }
+ }
+ }
+
+ var hasCatchAllTLSConnPolicy, addressQualifiesForTLS bool
+ autoHTTPSWillAddConnPolicy := srv.AutoHTTPS == nil || !srv.AutoHTTPS.Disabled
+
+ // if needed, the ServerLogConfig is initialized beforehand so
+ // that all server blocks can populate it with data, even when not
+ // coming with a log directive
+ for _, sblock := range p.serverBlocks {
+ if len(sblock.pile["custom_log"]) != 0 {
+ srv.Logs = new(caddyhttp.ServerLogConfig)
+ break
+ }
+ }
+
+ // add named routes to the server if 'invoke' was used inside of it
+ configuredNamedRoutes := options["named_routes"].(map[string]*caddyhttp.Route)
+ for _, sblock := range p.serverBlocks {
+ if len(sblock.pile[namedRouteKey]) == 0 {
+ continue
+ }
+ for _, value := range sblock.pile[namedRouteKey] {
+ if srv.NamedRoutes == nil {
+ srv.NamedRoutes = map[string]*caddyhttp.Route{}
+ }
+ name := value.Value.(string)
+ if configuredNamedRoutes[name] == nil {
+ return nil, fmt.Errorf("cannot invoke named route '%s', which was not defined", name)
+ }
+ srv.NamedRoutes[name] = configuredNamedRoutes[name]
+ }
+ }
+
+ // create a subroute for each site in the server block
+ for _, sblock := range p.serverBlocks {
+ matcherSetsEnc, err := st.compileEncodedMatcherSets(sblock)
+ if err != nil {
+ return nil, fmt.Errorf("server block %v: compiling matcher sets: %v", sblock.block.Keys, err)
+ }
+
+ hosts := sblock.hostsFromKeys(false)
+
+ // emit warnings if user put unspecified IP addresses; they probably want the bind directive
+ for _, h := range hosts {
+ if h == "0.0.0.0" || h == "::" {
+ caddy.Log().Named("caddyfile").Warn("Site block has an unspecified IP address which only matches requests having that Host header; you probably want the 'bind' directive to configure the socket", zap.String("address", h))
+ }
+ }
+
+ // collect hosts that are forced to be automated
+ forceAutomatedNames := make(map[string]struct{})
+ if _, ok := sblock.pile["tls.force_automate"]; ok {
+ for _, host := range hosts {
+ forceAutomatedNames[host] = struct{}{}
+ }
+ }
+
+ // tls: connection policies
+ if cpVals, ok := sblock.pile["tls.connection_policy"]; ok {
+ // tls connection policies
+ for _, cpVal := range cpVals {
+ cp := cpVal.Value.(*caddytls.ConnectionPolicy)
+
+ // make sure the policy covers all hostnames from the block
+ for _, h := range hosts {
+ if h == defaultSNI {
+ hosts = append(hosts, "")
+ cp.DefaultSNI = defaultSNI
+ break
+ }
+ if h == fallbackSNI {
+ hosts = append(hosts, "")
+ cp.FallbackSNI = fallbackSNI
+ break
+ }
+ }
+
+ if len(hosts) > 0 {
+ slices.Sort(hosts) // for deterministic JSON output
+ cp.MatchersRaw = caddy.ModuleMap{
+ "sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones
+ }
+ } else {
+ cp.DefaultSNI = defaultSNI
+ cp.FallbackSNI = fallbackSNI
+ }
+
+ // only append this policy if it actually changes something
+ if !cp.SettingsEmpty() || mapContains(forceAutomatedNames, hosts) {
+ srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
+ hasCatchAllTLSConnPolicy = len(hosts) == 0
+ }
+ }
+ }
+
+ for _, addr := range sblock.parsedKeys {
+ // if server only uses HTTP port, auto-HTTPS will not apply
+ if listenersUseAnyPortOtherThan(srv.Listen, httpPort) {
+ // exclude any hosts that were defined explicitly with "http://"
+ // in the key from automated cert management (issue #2998)
+ if addr.Scheme == "http" && addr.Host != "" {
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ if !slices.Contains(srv.AutoHTTPS.Skip, addr.Host) {
+ srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, addr.Host)
+ }
+ }
+ }
+
+ // If TLS is specified as directive, it will also result in 1 or more connection policy being created
+ // Thus, catch-all address with non-standard port, e.g. :8443, can have TLS enabled without
+ // specifying prefix "https://"
+ // Second part of the condition is to allow creating TLS conn policy even though `auto_https` has been disabled
+ // ensuring compatibility with behavior described in below link
+ // https://caddy.community/t/making-sense-of-auto-https-and-why-disabling-it-still-serves-https-instead-of-http/9761
+ createdTLSConnPolicies, ok := sblock.pile["tls.connection_policy"]
+ hasTLSEnabled := (ok && len(createdTLSConnPolicies) > 0) ||
+ (addr.Host != "" && srv.AutoHTTPS != nil && !slices.Contains(srv.AutoHTTPS.Skip, addr.Host))
+
+ // we'll need to remember if the address qualifies for auto-HTTPS, so we
+ // can add a TLS conn policy if necessary
+ if addr.Scheme == "https" ||
+ (addr.Scheme != "http" && addr.Port != httpPort && hasTLSEnabled) {
+ addressQualifiesForTLS = true
+ }
+
+ // If prefer wildcard is enabled, then we add hosts that are
+ // already covered by the wildcard to the skip list
+ if addressQualifiesForTLS && srv.AutoHTTPS != nil && srv.AutoHTTPS.PreferWildcard {
+ baseDomain := addr.Host
+ if idx := strings.Index(baseDomain, "."); idx != -1 {
+ baseDomain = baseDomain[idx+1:]
+ }
+ if !strings.HasPrefix(addr.Host, "*.") && slices.Contains(wildcardHosts, baseDomain) {
+ srv.AutoHTTPS.SkipCerts = append(srv.AutoHTTPS.SkipCerts, addr.Host)
+ }
+ }
+
+ // predict whether auto-HTTPS will add the conn policy for us; if so, we
+ // may not need to add one for this server
+ autoHTTPSWillAddConnPolicy = autoHTTPSWillAddConnPolicy &&
+ (addr.Port == httpsPort || (addr.Port != httpPort && addr.Host != ""))
+ }
+
+ // Look for any config values that provide listener wrappers on the server block
+ for _, listenerConfig := range sblock.pile["listener_wrapper"] {
+ listenerWrapper, ok := listenerConfig.Value.(caddy.ListenerWrapper)
+ if !ok {
+ return nil, fmt.Errorf("config for a listener wrapper did not provide a value that implements caddy.ListenerWrapper")
+ }
+ jsonListenerWrapper := caddyconfig.JSONModuleObject(
+ listenerWrapper,
+ "wrapper",
+ listenerWrapper.(caddy.Module).CaddyModule().ID.Name(),
+ warnings)
+ srv.ListenerWrappersRaw = append(srv.ListenerWrappersRaw, jsonListenerWrapper)
+ }
+
+ // set up each handler directive, making sure to honor directive order
+ dirRoutes := sblock.pile["route"]
+ siteSubroute, err := buildSubroute(dirRoutes, groupCounter, true)
+ if err != nil {
+ return nil, err
+ }
+
+ // add the site block's route(s) to the server
+ srv.Routes = appendSubrouteToRouteList(srv.Routes, siteSubroute, matcherSetsEnc, p, warnings)
+
+ // if error routes are defined, add those too
+ if errorSubrouteVals, ok := sblock.pile["error_route"]; ok {
+ if srv.Errors == nil {
+ srv.Errors = new(caddyhttp.HTTPErrorConfig)
+ }
+ sort.SliceStable(errorSubrouteVals, func(i, j int) bool {
+ sri, srj := errorSubrouteVals[i].Value.(*caddyhttp.Subroute), errorSubrouteVals[j].Value.(*caddyhttp.Subroute)
+ if len(sri.Routes[0].MatcherSetsRaw) == 0 && len(srj.Routes[0].MatcherSetsRaw) != 0 {
+ return false
+ }
+ return true
+ })
+ errorsSubroute := &caddyhttp.Subroute{}
+ for _, val := range errorSubrouteVals {
+ sr := val.Value.(*caddyhttp.Subroute)
+ errorsSubroute.Routes = append(errorsSubroute.Routes, sr.Routes...)
+ }
+ srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, errorsSubroute, matcherSetsEnc, p, warnings)
+ }
+
+ // add log associations
+ // see https://github.com/caddyserver/caddy/issues/3310
+ sblockLogHosts := sblock.hostsFromKeys(true)
+ for _, cval := range sblock.pile["custom_log"] {
+ ncl := cval.Value.(namedCustomLog)
+
+ // if `no_hostname` is set, then this logger will not
+ // be associated with any of the site block's hostnames,
+ // and only be usable via the `log_name` directive
+ // or the `access_logger_names` variable
+ if ncl.noHostname {
+ continue
+ }
+
+ if sblock.hasHostCatchAllKey() && len(ncl.hostnames) == 0 {
+ // all requests for hosts not able to be listed should use
+ // this log because it's a catch-all-hosts server block
+ srv.Logs.DefaultLoggerName = ncl.name
+ } else if len(ncl.hostnames) > 0 {
+ // if the logger overrides the hostnames, map that to the logger name
+ for _, h := range ncl.hostnames {
+ if srv.Logs.LoggerNames == nil {
+ srv.Logs.LoggerNames = make(map[string]caddyhttp.StringArray)
+ }
+ srv.Logs.LoggerNames[h] = append(srv.Logs.LoggerNames[h], ncl.name)
+ }
+ } else {
+ // otherwise, map each host to the logger name
+ for _, h := range sblockLogHosts {
+ // strip the port from the host, if any
+ host, _, err := net.SplitHostPort(h)
+ if err != nil {
+ host = h
+ }
+ if srv.Logs.LoggerNames == nil {
+ srv.Logs.LoggerNames = make(map[string]caddyhttp.StringArray)
+ }
+ srv.Logs.LoggerNames[host] = append(srv.Logs.LoggerNames[host], ncl.name)
+ }
+ }
+ }
+ if srv.Logs != nil && len(sblock.pile["custom_log"]) == 0 {
+ // server has access logs enabled, but this server block does not
+ // enable access logs; therefore, all hosts of this server block
+ // should not be access-logged
+ if len(hosts) == 0 {
+ // if the server block has a catch-all-hosts key, then we should
+ // not log reqs to any host unless it appears in the map
+ srv.Logs.SkipUnmappedHosts = true
+ }
+ srv.Logs.SkipHosts = append(srv.Logs.SkipHosts, sblockLogHosts...)
+ }
+ }
+
+ // sort for deterministic JSON output
+ if srv.Logs != nil {
+ slices.Sort(srv.Logs.SkipHosts)
+ }
+
+ // a server cannot (natively) serve both HTTP and HTTPS at the
+ // same time, so make sure the configuration isn't in conflict
+ err := detectConflictingSchemes(srv, p.serverBlocks, options)
+ if err != nil {
+ return nil, err
+ }
+
+ // a catch-all TLS conn policy is necessary to ensure TLS can
+ // be offered to all hostnames of the server; even though only
+ // one policy is needed to enable TLS for the server, that
+ // policy might apply to only certain TLS handshakes; but when
+ // using the Caddyfile, user would expect all handshakes to at
+ // least have a matching connection policy, so here we append a
+ // catch-all/default policy if there isn't one already (it's
+ // important that it goes at the end) - see issue #3004:
+ // https://github.com/caddyserver/caddy/issues/3004
+ // TODO: maybe a smarter way to handle this might be to just make the
+ // auto-HTTPS logic at provision-time detect if there is any connection
+ // policy missing for any HTTPS-enabled hosts, if so, add it... maybe?
+ if addressQualifiesForTLS &&
+ !hasCatchAllTLSConnPolicy &&
+ (len(srv.TLSConnPolicies) > 0 || !autoHTTPSWillAddConnPolicy || defaultSNI != "" || fallbackSNI != "") {
+ srv.TLSConnPolicies = append(srv.TLSConnPolicies, &caddytls.ConnectionPolicy{
+ DefaultSNI: defaultSNI,
+ FallbackSNI: fallbackSNI,
+ })
+ }
+
+ // tidy things up a bit
+ srv.TLSConnPolicies, err = consolidateConnPolicies(srv.TLSConnPolicies)
+ if err != nil {
+ return nil, fmt.Errorf("consolidating TLS connection policies for server %d: %v", i, err)
+ }
+ srv.Routes = consolidateRoutes(srv.Routes)
+
+ servers[fmt.Sprintf("srv%d", i)] = srv
+ }
+
+ if err := applyServerOptions(servers, options, warnings); err != nil {
+ return nil, fmt.Errorf("applying global server options: %v", err)
+ }
+
+ return servers, nil
+}
+
+func detectConflictingSchemes(srv *caddyhttp.Server, serverBlocks []serverBlock, options map[string]any) error {
+ httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
+ if hp, ok := options["http_port"].(int); ok {
+ httpPort = strconv.Itoa(hp)
+ }
+ httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort)
+ if hsp, ok := options["https_port"].(int); ok {
+ httpsPort = strconv.Itoa(hsp)
+ }
+
+ var httpOrHTTPS string
+ checkAndSetHTTP := func(addr Address) error {
+ if httpOrHTTPS == "HTTPS" {
+ errMsg := fmt.Errorf("server listening on %v is configured for HTTPS and cannot natively multiplex HTTP and HTTPS: %s",
+ srv.Listen, addr.Original)
+ if addr.Scheme == "" && addr.Host == "" {
+ errMsg = fmt.Errorf("%s (try specifying https:// in the address)", errMsg)
+ }
+ return errMsg
+ }
+ if len(srv.TLSConnPolicies) > 0 {
+ // any connection policies created for an HTTP server
+ // is a logical conflict, as it would enable HTTPS
+ return fmt.Errorf("server listening on %v is HTTP, but attempts to configure TLS connection policies", srv.Listen)
+ }
+ httpOrHTTPS = "HTTP"
+ return nil
+ }
+ checkAndSetHTTPS := func(addr Address) error {
+ if httpOrHTTPS == "HTTP" {
+ return fmt.Errorf("server listening on %v is configured for HTTP and cannot natively multiplex HTTP and HTTPS: %s",
+ srv.Listen, addr.Original)
+ }
+ httpOrHTTPS = "HTTPS"
+ return nil
+ }
+
+ for _, sblock := range serverBlocks {
+ for _, addr := range sblock.parsedKeys {
+ if addr.Scheme == "http" || addr.Port == httpPort {
+ if err := checkAndSetHTTP(addr); err != nil {
+ return err
+ }
+ } else if addr.Scheme == "https" || addr.Port == httpsPort || len(srv.TLSConnPolicies) > 0 {
+ if err := checkAndSetHTTPS(addr); err != nil {
+ return err
+ }
+ } else if addr.Host == "" {
+ if err := checkAndSetHTTP(addr); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// consolidateConnPolicies sorts any catch-all policy to the end, removes empty TLS connection
+// policies, and combines equivalent ones for a cleaner overall output.
+func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.ConnectionPolicies, error) {
+ // catch-all policies (those without any matcher) should be at the
+ // end, otherwise it nullifies any more specific policies
+ sort.SliceStable(cps, func(i, j int) bool {
+ return cps[j].MatchersRaw == nil && cps[i].MatchersRaw != nil
+ })
+
+ for i := 0; i < len(cps); i++ {
+ // compare it to the others
+ for j := 0; j < len(cps); j++ {
+ if j == i {
+ continue
+ }
+
+ // if they're exactly equal in every way, just keep one of them
+ if reflect.DeepEqual(cps[i], cps[j]) {
+ cps = append(cps[:j], cps[j+1:]...)
+ i--
+ break
+ }
+
+ // if they have the same matcher, try to reconcile each field: either they must
+ // be identical, or we have to be able to combine them safely
+ if reflect.DeepEqual(cps[i].MatchersRaw, cps[j].MatchersRaw) {
+ if len(cps[i].ALPN) > 0 &&
+ len(cps[j].ALPN) > 0 &&
+ !reflect.DeepEqual(cps[i].ALPN, cps[j].ALPN) {
+ return nil, fmt.Errorf("two policies with same match criteria have conflicting ALPN: %v vs. %v",
+ cps[i].ALPN, cps[j].ALPN)
+ }
+ if len(cps[i].CipherSuites) > 0 &&
+ len(cps[j].CipherSuites) > 0 &&
+ !reflect.DeepEqual(cps[i].CipherSuites, cps[j].CipherSuites) {
+ return nil, fmt.Errorf("two policies with same match criteria have conflicting cipher suites: %v vs. %v",
+ cps[i].CipherSuites, cps[j].CipherSuites)
+ }
+ if cps[i].ClientAuthentication == nil &&
+ cps[j].ClientAuthentication != nil &&
+ !reflect.DeepEqual(cps[i].ClientAuthentication, cps[j].ClientAuthentication) {
+ return nil, fmt.Errorf("two policies with same match criteria have conflicting client auth configuration: %+v vs. %+v",
+ cps[i].ClientAuthentication, cps[j].ClientAuthentication)
+ }
+ if len(cps[i].Curves) > 0 &&
+ len(cps[j].Curves) > 0 &&
+ !reflect.DeepEqual(cps[i].Curves, cps[j].Curves) {
+ return nil, fmt.Errorf("two policies with same match criteria have conflicting curves: %v vs. %v",
+ cps[i].Curves, cps[j].Curves)
+ }
+ if cps[i].DefaultSNI != "" &&
+ cps[j].DefaultSNI != "" &&
+ cps[i].DefaultSNI != cps[j].DefaultSNI {
+ return nil, fmt.Errorf("two policies with same match criteria have conflicting default SNI: %s vs. %s",
+ cps[i].DefaultSNI, cps[j].DefaultSNI)
+ }
+ if cps[i].ProtocolMin != "" &&
+ cps[j].ProtocolMin != "" &&
+ cps[i].ProtocolMin != cps[j].ProtocolMin {
+ return nil, fmt.Errorf("two policies with same match criteria have conflicting min protocol: %s vs. %s",
+ cps[i].ProtocolMin, cps[j].ProtocolMin)
+ }
+ if cps[i].ProtocolMax != "" &&
+ cps[j].ProtocolMax != "" &&
+ cps[i].ProtocolMax != cps[j].ProtocolMax {
+ return nil, fmt.Errorf("two policies with same match criteria have conflicting max protocol: %s vs. %s",
+ cps[i].ProtocolMax, cps[j].ProtocolMax)
+ }
+ if cps[i].CertSelection != nil && cps[j].CertSelection != nil {
+ // merging fields other than AnyTag is not implemented
+ if !reflect.DeepEqual(cps[i].CertSelection.SerialNumber, cps[j].CertSelection.SerialNumber) ||
+ !reflect.DeepEqual(cps[i].CertSelection.SubjectOrganization, cps[j].CertSelection.SubjectOrganization) ||
+ cps[i].CertSelection.PublicKeyAlgorithm != cps[j].CertSelection.PublicKeyAlgorithm ||
+ !reflect.DeepEqual(cps[i].CertSelection.AllTags, cps[j].CertSelection.AllTags) {
+ return nil, fmt.Errorf("two policies with same match criteria have conflicting cert selections: %+v vs. %+v",
+ cps[i].CertSelection, cps[j].CertSelection)
+ }
+ }
+
+ // by now we've decided that we can merge the two -- we'll keep i and drop j
+
+ if len(cps[i].ALPN) == 0 && len(cps[j].ALPN) > 0 {
+ cps[i].ALPN = cps[j].ALPN
+ }
+ if len(cps[i].CipherSuites) == 0 && len(cps[j].CipherSuites) > 0 {
+ cps[i].CipherSuites = cps[j].CipherSuites
+ }
+ if cps[i].ClientAuthentication == nil && cps[j].ClientAuthentication != nil {
+ cps[i].ClientAuthentication = cps[j].ClientAuthentication
+ }
+ if len(cps[i].Curves) == 0 && len(cps[j].Curves) > 0 {
+ cps[i].Curves = cps[j].Curves
+ }
+ if cps[i].DefaultSNI == "" && cps[j].DefaultSNI != "" {
+ cps[i].DefaultSNI = cps[j].DefaultSNI
+ }
+ if cps[i].ProtocolMin == "" && cps[j].ProtocolMin != "" {
+ cps[i].ProtocolMin = cps[j].ProtocolMin
+ }
+ if cps[i].ProtocolMax == "" && cps[j].ProtocolMax != "" {
+ cps[i].ProtocolMax = cps[j].ProtocolMax
+ }
+
+ if cps[i].CertSelection == nil && cps[j].CertSelection != nil {
+ // if j is the only one with a policy, move it over to i
+ cps[i].CertSelection = cps[j].CertSelection
+ } else if cps[i].CertSelection != nil && cps[j].CertSelection != nil {
+ // if both have one, then combine AnyTag
+ for _, tag := range cps[j].CertSelection.AnyTag {
+ if !slices.Contains(cps[i].CertSelection.AnyTag, tag) {
+ cps[i].CertSelection.AnyTag = append(cps[i].CertSelection.AnyTag, tag)
+ }
+ }
+ }
+
+ cps = append(cps[:j], cps[j+1:]...)
+ i--
+ break
+ }
+ }
+ }
+ return cps, nil
+}
+
+// appendSubrouteToRouteList appends the routes in subroute
+// to the routeList, optionally qualified by matchers.
+func appendSubrouteToRouteList(routeList caddyhttp.RouteList,
+ subroute *caddyhttp.Subroute,
+ matcherSetsEnc []caddy.ModuleMap,
+ p sbAddrAssociation,
+ warnings *[]caddyconfig.Warning,
+) caddyhttp.RouteList {
+ // nothing to do if... there's nothing to do
+ if len(matcherSetsEnc) == 0 && len(subroute.Routes) == 0 && subroute.Errors == nil {
+ return routeList
+ }
+
+ // No need to wrap the handlers in a subroute if this is the only server block
+ // and there is no matcher for it (doing so would produce unnecessarily nested
+ // JSON), *unless* there is a host matcher within this site block; if so, then
+ // we still need to wrap in a subroute because otherwise the host matcher from
+ // the inside of the site block would be a top-level host matcher, which is
+ // subject to auto-HTTPS (cert management), and using a host matcher within
+ // a site block is a valid, common pattern for excluding domains from cert
+ // management, leading to unexpected behavior; see issue #5124.
+ wrapInSubroute := true
+ if len(matcherSetsEnc) == 0 && len(p.serverBlocks) == 1 {
+ var hasHostMatcher bool
+ outer:
+ for _, route := range subroute.Routes {
+ for _, ms := range route.MatcherSetsRaw {
+ for matcherName := range ms {
+ if matcherName == "host" {
+ hasHostMatcher = true
+ break outer
+ }
+ }
+ }
+ }
+ wrapInSubroute = hasHostMatcher
+ }
+
+ if wrapInSubroute {
+ route := caddyhttp.Route{
+ // the semantics of a site block in the Caddyfile dictate
+ // that only the first matching one is evaluated, since
+ // site blocks do not cascade nor inherit
+ Terminal: true,
+ }
+ if len(matcherSetsEnc) > 0 {
+ route.MatcherSetsRaw = matcherSetsEnc
+ }
+ if len(subroute.Routes) > 0 || subroute.Errors != nil {
+ route.HandlersRaw = []json.RawMessage{
+ caddyconfig.JSONModuleObject(subroute, "handler", "subroute", warnings),
+ }
+ }
+ if len(route.MatcherSetsRaw) > 0 || len(route.HandlersRaw) > 0 {
+ routeList = append(routeList, route)
+ }
+ } else {
+ routeList = append(routeList, subroute.Routes...)
+ }
+
+ return routeList
+}
+
+// buildSubroute turns the config values, which are expected to be routes
+// into a clean and orderly subroute that has all the routes within it.
+func buildSubroute(routes []ConfigValue, groupCounter counter, needsSorting bool) (*caddyhttp.Subroute, error) {
+ if needsSorting {
+ for _, val := range routes {
+ if !slices.Contains(directiveOrder, val.directive) {
+ return nil, fmt.Errorf("directive '%s' is not an ordered HTTP handler, so it cannot be used here - try placing within a route block or using the order global option", val.directive)
+ }
+ }
+
+ sortRoutes(routes)
+ }
+
+ subroute := new(caddyhttp.Subroute)
+
+ // some directives are mutually exclusive (only first matching
+ // instance should be evaluated); this is done by putting their
+ // routes in the same group
+ mutuallyExclusiveDirs := map[string]*struct {
+ count int
+ groupName string
+ }{
+ // as a special case, group rewrite directives so that they are mutually exclusive;
+ // this means that only the first matching rewrite will be evaluated, and that's
+ // probably a good thing, since there should never be a need to do more than one
+ // rewrite (I think?), and cascading rewrites smell bad... imagine these rewrites:
+ // rewrite /docs/json/* /docs/json/index.html
+ // rewrite /docs/* /docs/index.html
+ // (We use this on the Caddy website, or at least we did once.) The first rewrite's
+ // result is also matched by the second rewrite, making the first rewrite pointless.
+ // See issue #2959.
+ "rewrite": {},
+
+ // handle blocks are also mutually exclusive by definition
+ "handle": {},
+
+ // root just sets a variable, so if it was not mutually exclusive, intersecting
+ // root directives would overwrite previously-matched ones; they should not cascade
+ "root": {},
+ }
+
+ // we need to deterministically loop over each of these directives
+ // in order to keep the group numbers consistent
+ keys := make([]string, 0, len(mutuallyExclusiveDirs))
+ for k := range mutuallyExclusiveDirs {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, meDir := range keys {
+ info := mutuallyExclusiveDirs[meDir]
+
+ // see how many instances of the directive there are
+ for _, r := range routes {
+ if r.directive == meDir {
+ info.count++
+ if info.count > 1 {
+ break
+ }
+ }
+ }
+ // if there is more than one, put them in a group
+ // (special case: "rewrite" directive must always be in
+ // its own group--even if there is only one--because we
+ // do not want a rewrite to be consolidated into other
+ // adjacent routes that happen to have the same matcher,
+ // see caddyserver/caddy#3108 - because the implied
+ // intent of rewrite is to do an internal redirect,
+ // we can't assume that the request will continue to
+ // match the same matcher; anyway, giving a route a
+ // unique group name should keep it from consolidating)
+ if info.count > 1 || meDir == "rewrite" {
+ info.groupName = groupCounter.nextGroup()
+ }
+ }
+
+ // add all the routes piled in from directives
+ for _, r := range routes {
+ // put this route into a group if it is mutually exclusive
+ if info, ok := mutuallyExclusiveDirs[r.directive]; ok {
+ route := r.Value.(caddyhttp.Route)
+ route.Group = info.groupName
+ r.Value = route
+ }
+
+ switch route := r.Value.(type) {
+ case caddyhttp.Subroute:
+ // if a route-class config value is actually a Subroute handler
+ // with nothing but a list of routes, then it is the intention
+ // of the directive to keep these handlers together and in this
+ // same order, but not necessarily in a subroute (if it wanted
+ // to keep them in a subroute, the directive would have returned
+ // a route with a Subroute as its handler); this is useful to
+ // keep multiple handlers/routes together and in the same order
+ // so that the sorting procedure we did above doesn't reorder them
+ if route.Errors != nil {
+ // if error handlers are also set, this is confusing; it's
+ // probably supposed to be wrapped in a Route and encoded
+ // as a regular handler route... programmer error.
+ panic("found subroute with more than just routes; perhaps it should have been wrapped in a route?")
+ }
+ subroute.Routes = append(subroute.Routes, route.Routes...)
+ case caddyhttp.Route:
+ subroute.Routes = append(subroute.Routes, route)
+ }
+ }
+
+ subroute.Routes = consolidateRoutes(subroute.Routes)
+
+ return subroute, nil
+}
+
+// normalizeDirectiveName ensures directives that should be sorted
+// at the same level are named the same before sorting happens.
+func normalizeDirectiveName(directive string) string {
+ // As a special case, we want "handle_path" to be sorted
+ // at the same level as "handle", so we force them to use
+ // the same directive name after their parsing is complete.
+ // See https://github.com/caddyserver/caddy/issues/3675#issuecomment-678042377
+ if directive == "handle_path" {
+ directive = "handle"
+ }
+ return directive
+}
+
+// consolidateRoutes combines routes with the same properties
+// (same matchers, same Terminal and Group settings) for a
+// cleaner overall output.
+func consolidateRoutes(routes caddyhttp.RouteList) caddyhttp.RouteList {
+ for i := 0; i < len(routes)-1; i++ {
+ if reflect.DeepEqual(routes[i].MatcherSetsRaw, routes[i+1].MatcherSetsRaw) &&
+ routes[i].Terminal == routes[i+1].Terminal &&
+ routes[i].Group == routes[i+1].Group {
+ // keep the handlers in the same order, then splice out repetitive route
+ routes[i].HandlersRaw = append(routes[i].HandlersRaw, routes[i+1].HandlersRaw...)
+ routes = append(routes[:i+1], routes[i+2:]...)
+ i--
+ }
+ }
+ return routes
+}
+
+func matcherSetFromMatcherToken(
+ tkn caddyfile.Token,
+ matcherDefs map[string]caddy.ModuleMap,
+ warnings *[]caddyconfig.Warning,
+) (caddy.ModuleMap, bool, error) {
+ // matcher tokens can be wildcards, simple path matchers,
+ // or refer to a pre-defined matcher by some name
+ if tkn.Text == "*" {
+ // match all requests == no matchers, so nothing to do
+ return nil, true, nil
+ }
+
+ // convenient way to specify a single path match
+ if strings.HasPrefix(tkn.Text, "/") {
+ return caddy.ModuleMap{
+ "path": caddyconfig.JSON(caddyhttp.MatchPath{tkn.Text}, warnings),
+ }, true, nil
+ }
+
+ // pre-defined matcher
+ if strings.HasPrefix(tkn.Text, matcherPrefix) {
+ m, ok := matcherDefs[tkn.Text]
+ if !ok {
+ return nil, false, fmt.Errorf("unrecognized matcher name: %+v", tkn.Text)
+ }
+ return m, true, nil
+ }
+
+ return nil, false, nil
+}
+
+func (st *ServerType) compileEncodedMatcherSets(sblock serverBlock) ([]caddy.ModuleMap, error) {
+ type hostPathPair struct {
+ hostm caddyhttp.MatchHost
+ pathm caddyhttp.MatchPath
+ }
+
+ // keep routes with common host and path matchers together
+ var matcherPairs []*hostPathPair
+
+ var catchAllHosts bool
+ for _, addr := range sblock.parsedKeys {
+ // choose a matcher pair that should be shared by this
+ // server block; if none exists yet, create one
+ var chosenMatcherPair *hostPathPair
+ for _, mp := range matcherPairs {
+ if (len(mp.pathm) == 0 && addr.Path == "") ||
+ (len(mp.pathm) == 1 && mp.pathm[0] == addr.Path) {
+ chosenMatcherPair = mp
+ break
+ }
+ }
+ if chosenMatcherPair == nil {
+ chosenMatcherPair = new(hostPathPair)
+ if addr.Path != "" {
+ chosenMatcherPair.pathm = []string{addr.Path}
+ }
+ matcherPairs = append(matcherPairs, chosenMatcherPair)
+ }
+
+ // if one of the keys has no host (i.e. is a catch-all for
+ // any hostname), then we need to null out the host matcher
+ // entirely so that it matches all hosts
+ if addr.Host == "" && !catchAllHosts {
+ chosenMatcherPair.hostm = nil
+ catchAllHosts = true
+ }
+ if catchAllHosts {
+ continue
+ }
+
+ // add this server block's keys to the matcher
+ // pair if it doesn't already exist
+ if addr.Host != "" && !slices.Contains(chosenMatcherPair.hostm, addr.Host) {
+ chosenMatcherPair.hostm = append(chosenMatcherPair.hostm, addr.Host)
+ }
+ }
+
+ // iterate each pairing of host and path matchers and
+ // put them into a map for JSON encoding
+ var matcherSets []map[string]caddyhttp.RequestMatcherWithError
+ for _, mp := range matcherPairs {
+ matcherSet := make(map[string]caddyhttp.RequestMatcherWithError)
+ if len(mp.hostm) > 0 {
+ matcherSet["host"] = mp.hostm
+ }
+ if len(mp.pathm) > 0 {
+ matcherSet["path"] = mp.pathm
+ }
+ if len(matcherSet) > 0 {
+ matcherSets = append(matcherSets, matcherSet)
+ }
+ }
+
+ // finally, encode each of the matcher sets
+ matcherSetsEnc := make([]caddy.ModuleMap, 0, len(matcherSets))
+ for _, ms := range matcherSets {
+ msEncoded, err := encodeMatcherSet(ms)
+ if err != nil {
+ return nil, fmt.Errorf("server block %v: %v", sblock.block.Keys, err)
+ }
+ matcherSetsEnc = append(matcherSetsEnc, msEncoded)
+ }
+
+ return matcherSetsEnc, nil
+}
+
+func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error {
+ d.Next() // advance to the first token
+
+ // this is the "name" for "named matchers"
+ definitionName := d.Val()
+
+ if _, ok := matchers[definitionName]; ok {
+ return fmt.Errorf("matcher is defined more than once: %s", definitionName)
+ }
+ matchers[definitionName] = make(caddy.ModuleMap)
+
+ // given a matcher name and the tokens following it, parse
+ // the tokens as a matcher module and record it
+ makeMatcher := func(matcherName string, tokens []caddyfile.Token) error {
+ // create a new dispenser from the tokens
+ dispenser := caddyfile.NewDispenser(tokens)
+
+ // set the matcher name (without @) in the dispenser context so
+ // that matcher modules can access it to use it as their name
+ // (e.g. regexp matchers which use the name for capture groups)
+ dispenser.SetContext(caddyfile.MatcherNameCtxKey, definitionName[1:])
+
+ mod, err := caddy.GetModule("http.matchers." + matcherName)
+ if err != nil {
+ return fmt.Errorf("getting matcher module '%s': %v", matcherName, err)
+ }
+ unm, ok := mod.New().(caddyfile.Unmarshaler)
+ if !ok {
+ return fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
+ }
+ err = unm.UnmarshalCaddyfile(dispenser)
+ if err != nil {
+ return err
+ }
+
+ if rm, ok := unm.(caddyhttp.RequestMatcherWithError); ok {
+ matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
+ return nil
+ }
+ // nolint:staticcheck
+ if rm, ok := unm.(caddyhttp.RequestMatcher); ok {
+ matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
+ return nil
+ }
+ return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
+ }
+
+ // if the next token is quoted, we can assume it's not a matcher name
+ // and that it's probably an 'expression' matcher
+ if d.NextArg() {
+ if d.Token().Quoted() {
+ // since it was missing the matcher name, we insert a token
+ // in front of the expression token itself; we use Clone() to
+ // make the new token to keep the same the import location as
+ // the next token, if this is within a snippet or imported file.
+ // see https://github.com/caddyserver/caddy/issues/6287
+ expressionToken := d.Token().Clone()
+ expressionToken.Text = "expression"
+ err := makeMatcher("expression", []caddyfile.Token{expressionToken, d.Token()})
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // if it wasn't quoted, then we need to rewind after calling
+ // d.NextArg() so the below properly grabs the matcher name
+ d.Prev()
+ }
+
+ // in case there are multiple instances of the same matcher, concatenate
+ // their tokens (we expect that UnmarshalCaddyfile should be able to
+ // handle more than one segment); otherwise, we'd overwrite other
+ // instances of the matcher in this set
+ tokensByMatcherName := make(map[string][]caddyfile.Token)
+ for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
+ matcherName := d.Val()
+ tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
+ }
+ for matcherName, tokens := range tokensByMatcherName {
+ err := makeMatcher(matcherName, tokens)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcherWithError) (caddy.ModuleMap, error) {
+ msEncoded := make(caddy.ModuleMap)
+ for matcherName, val := range matchers {
+ jsonBytes, err := json.Marshal(val)
+ if err != nil {
+ return nil, fmt.Errorf("marshaling matcher set %#v: %v", matchers, err)
+ }
+ msEncoded[matcherName] = jsonBytes
+ }
+ return msEncoded, nil
+}
+
+// WasReplacedPlaceholderShorthand checks if a token string was
+// likely a replaced shorthand of the known Caddyfile placeholder
+// replacement outputs. Useful to prevent some user-defined map
+// output destinations from overlapping with one of the
+// predefined shorthands.
+func WasReplacedPlaceholderShorthand(token string) string {
+ prev := ""
+ for i, item := range placeholderShorthands() {
+ // only look at every 2nd item, which is the replacement
+ if i%2 == 0 {
+ prev = item
+ continue
+ }
+ if strings.Trim(token, "{}") == strings.Trim(item, "{}") {
+ // we return the original shorthand so it
+ // can be used for an error message
+ return prev
+ }
+ }
+ return ""
+}
+
+// tryInt tries to convert val to an integer. If it fails,
+// it downgrades the error to a warning and returns 0.
+func tryInt(val any, warnings *[]caddyconfig.Warning) int {
+ intVal, ok := val.(int)
+ if val != nil && !ok && warnings != nil {
+ *warnings = append(*warnings, caddyconfig.Warning{Message: "not an integer type"})
+ }
+ return intVal
+}
+
+func tryString(val any, warnings *[]caddyconfig.Warning) string {
+ stringVal, ok := val.(string)
+ if val != nil && !ok && warnings != nil {
+ *warnings = append(*warnings, caddyconfig.Warning{Message: "not a string type"})
+ }
+ return stringVal
+}
+
+func tryDuration(val any, warnings *[]caddyconfig.Warning) caddy.Duration {
+ durationVal, ok := val.(caddy.Duration)
+ if val != nil && !ok && warnings != nil {
+ *warnings = append(*warnings, caddyconfig.Warning{Message: "not a duration type"})
+ }
+ return durationVal
+}
+
+// listenersUseAnyPortOtherThan returns true if there are any
+// listeners in addresses that use a port which is not otherPort.
+// Mostly borrowed from unexported method in caddyhttp package.
+func listenersUseAnyPortOtherThan(addresses []string, otherPort string) bool {
+ otherPortInt, err := strconv.Atoi(otherPort)
+ if err != nil {
+ return false
+ }
+ for _, lnAddr := range addresses {
+ laddrs, err := caddy.ParseNetworkAddress(lnAddr)
+ if err != nil {
+ continue
+ }
+ if uint(otherPortInt) > laddrs.EndPort || uint(otherPortInt) < laddrs.StartPort {
+ return true
+ }
+ }
+ return false
+}
+
+func mapContains[K comparable, V any](m map[K]V, keys []K) bool {
+ if len(m) == 0 || len(keys) == 0 {
+ return false
+ }
+ for _, key := range keys {
+ if _, ok := m[key]; ok {
+ return true
+ }
+ }
+ return false
+}
+
+// specificity returns len(s) minus any wildcards (*) and
+// placeholders ({...}). Basically, it's a length count
+// that penalizes the use of wildcards and placeholders.
+// This is useful for comparing hostnames and paths.
+// However, wildcards in paths are not a sure answer to
+// the question of specificity. For example,
+// '*.example.com' is clearly less specific than
+// 'a.example.com', but is '/a' more or less specific
+// than '/a*'?
+func specificity(s string) int {
+ l := len(s) - strings.Count(s, "*")
+ for len(s) > 0 {
+ start := strings.Index(s, "{")
+ if start < 0 {
+ return l
+ }
+ end := strings.Index(s[start:], "}") + start + 1
+ if end <= start {
+ return l
+ }
+ l -= end - start
+ s = s[end:]
+ }
+ return l
+}
+
+type counter struct {
+ n *int
+}
+
+func (c counter) nextGroup() string {
+ name := fmt.Sprintf("group%d", *c.n)
+ *c.n++
+ return name
+}
+
+type namedCustomLog struct {
+ name string
+ hostnames []string
+ log *caddy.CustomLog
+ noHostname bool
+}
+
+// addressWithProtocols associates a listen address with
+// the protocols to serve it with
+type addressWithProtocols struct {
+ address string
+ protocols []string
+}
+
+// sbAddrAssociation is a mapping from a list of
+// addresses with protocols, and a list of server
+// blocks that are served on those addresses.
+type sbAddrAssociation struct {
+ addressesWithProtocols []addressWithProtocols
+ serverBlocks []serverBlock
+}
+
+const (
+ matcherPrefix = "@"
+ namedRouteKey = "named_route"
+)
+
+// Interface guard
+var _ caddyfile.ServerType = (*ServerType)(nil)
diff --git a/caddyconfig/httpcaddyfile/httptype_test.go b/caddyconfig/httpcaddyfile/httptype_test.go
new file mode 100644
index 00000000000..69f55501cae
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/httptype_test.go
@@ -0,0 +1,211 @@
+package httpcaddyfile
+
+import (
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+)
+
+func TestMatcherSyntax(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ expectError bool
+ }{
+ {
+ input: `http://localhost
+ @debug {
+ query showdebug=1
+ }
+ `,
+ expectError: false,
+ },
+ {
+ input: `http://localhost
+ @debug {
+ query bad format
+ }
+ `,
+ expectError: true,
+ },
+ {
+ input: `http://localhost
+ @debug {
+ not {
+ path /somepath*
+ }
+ }
+ `,
+ expectError: false,
+ },
+ {
+ input: `http://localhost
+ @debug {
+ not path /somepath*
+ }
+ `,
+ expectError: false,
+ },
+ {
+ input: `http://localhost
+ @debug not path /somepath*
+ `,
+ expectError: false,
+ },
+ {
+ input: `@matcher {
+ path /matcher-not-allowed/outside-of-site-block/*
+ }
+ http://localhost
+ `,
+ expectError: true,
+ },
+ } {
+
+ adapter := caddyfile.Adapter{
+ ServerType: ServerType{},
+ }
+
+ _, _, err := adapter.Adapt([]byte(tc.input), nil)
+
+ if err != nil != tc.expectError {
+ t.Errorf("Test %d error expectation failed Expected: %v, got %s", i, tc.expectError, err)
+ continue
+ }
+ }
+}
+
+func TestSpecificity(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ expect int
+ }{
+ {"", 0},
+ {"*", 0},
+ {"*.*", 1},
+ {"{placeholder}", 0},
+ {"/{placeholder}", 1},
+ {"foo", 3},
+ {"example.com", 11},
+ {"a.example.com", 13},
+ {"*.example.com", 12},
+ {"/foo", 4},
+ {"/foo*", 4},
+ {"{placeholder}.example.com", 12},
+ {"{placeholder.example.com", 24},
+ {"}.", 2},
+ {"}{", 2},
+ {"{}", 0},
+ {"{{{}}", 1},
+ } {
+ actual := specificity(tc.input)
+ if actual != tc.expect {
+ t.Errorf("Test %d (%s): Expected %d but got %d", i, tc.input, tc.expect, actual)
+ }
+ }
+}
+
+func TestGlobalOptions(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ expectError bool
+ }{
+ {
+ input: `
+ {
+ email test@example.com
+ }
+ :80
+ `,
+ expectError: false,
+ },
+ {
+ input: `
+ {
+ admin off
+ }
+ :80
+ `,
+ expectError: false,
+ },
+ {
+ input: `
+ {
+ admin 127.0.0.1:2020
+ }
+ :80
+ `,
+ expectError: false,
+ },
+ {
+ input: `
+ {
+ admin {
+ disabled false
+ }
+ }
+ :80
+ `,
+ expectError: true,
+ },
+ {
+ input: `
+ {
+ admin {
+ enforce_origin
+ origins 192.168.1.1:2020 127.0.0.1:2020
+ }
+ }
+ :80
+ `,
+ expectError: false,
+ },
+ {
+ input: `
+ {
+ admin 127.0.0.1:2020 {
+ enforce_origin
+ origins 192.168.1.1:2020 127.0.0.1:2020
+ }
+ }
+ :80
+ `,
+ expectError: false,
+ },
+ {
+ input: `
+ {
+ admin 192.168.1.1:2020 127.0.0.1:2020 {
+ enforce_origin
+ origins 192.168.1.1:2020 127.0.0.1:2020
+ }
+ }
+ :80
+ `,
+ expectError: true,
+ },
+ {
+ input: `
+ {
+ admin off {
+ enforce_origin
+ origins 192.168.1.1:2020 127.0.0.1:2020
+ }
+ }
+ :80
+ `,
+ expectError: true,
+ },
+ } {
+
+ adapter := caddyfile.Adapter{
+ ServerType: ServerType{},
+ }
+
+ _, _, err := adapter.Adapt([]byte(tc.input), nil)
+
+ if err != nil != tc.expectError {
+ t.Errorf("Test %d error expectation failed Expected: %v, got %s", i, tc.expectError, err)
+ continue
+ }
+ }
+}
diff --git a/caddyconfig/httpcaddyfile/options.go b/caddyconfig/httpcaddyfile/options.go
new file mode 100644
index 00000000000..d4a42462435
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/options.go
@@ -0,0 +1,572 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "slices"
+ "strconv"
+
+ "github.com/caddyserver/certmagic"
+ "github.com/mholt/acmez/v3/acme"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
+)
+
+func init() {
+ RegisterGlobalOption("debug", parseOptTrue)
+ RegisterGlobalOption("http_port", parseOptHTTPPort)
+ RegisterGlobalOption("https_port", parseOptHTTPSPort)
+ RegisterGlobalOption("default_bind", parseOptDefaultBind)
+ RegisterGlobalOption("grace_period", parseOptDuration)
+ RegisterGlobalOption("shutdown_delay", parseOptDuration)
+ RegisterGlobalOption("default_sni", parseOptSingleString)
+ RegisterGlobalOption("fallback_sni", parseOptSingleString)
+ RegisterGlobalOption("order", parseOptOrder)
+ RegisterGlobalOption("storage", parseOptStorage)
+ RegisterGlobalOption("storage_check", parseStorageCheck)
+ RegisterGlobalOption("storage_clean_interval", parseStorageCleanInterval)
+ RegisterGlobalOption("renew_interval", parseOptDuration)
+ RegisterGlobalOption("ocsp_interval", parseOptDuration)
+ RegisterGlobalOption("acme_ca", parseOptSingleString)
+ RegisterGlobalOption("acme_ca_root", parseOptSingleString)
+ RegisterGlobalOption("acme_dns", parseOptACMEDNS)
+ RegisterGlobalOption("acme_eab", parseOptACMEEAB)
+ RegisterGlobalOption("cert_issuer", parseOptCertIssuer)
+ RegisterGlobalOption("skip_install_trust", parseOptTrue)
+ RegisterGlobalOption("email", parseOptSingleString)
+ RegisterGlobalOption("admin", parseOptAdmin)
+ RegisterGlobalOption("on_demand_tls", parseOptOnDemand)
+ RegisterGlobalOption("local_certs", parseOptTrue)
+ RegisterGlobalOption("key_type", parseOptSingleString)
+ RegisterGlobalOption("auto_https", parseOptAutoHTTPS)
+ RegisterGlobalOption("metrics", parseMetricsOptions)
+ RegisterGlobalOption("servers", parseServerOptions)
+ RegisterGlobalOption("ocsp_stapling", parseOCSPStaplingOptions)
+ RegisterGlobalOption("cert_lifetime", parseOptDuration)
+ RegisterGlobalOption("log", parseLogOptions)
+ RegisterGlobalOption("preferred_chains", parseOptPreferredChains)
+ RegisterGlobalOption("persist_config", parseOptPersistConfig)
+}
+
+func parseOptTrue(d *caddyfile.Dispenser, _ any) (any, error) { return true, nil }
+
+func parseOptHTTPPort(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ var httpPort int
+ var httpPortStr string
+ if !d.AllArgs(&httpPortStr) {
+ return 0, d.ArgErr()
+ }
+ var err error
+ httpPort, err = strconv.Atoi(httpPortStr)
+ if err != nil {
+ return 0, d.Errf("converting port '%s' to integer value: %v", httpPortStr, err)
+ }
+ return httpPort, nil
+}
+
+func parseOptHTTPSPort(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ var httpsPort int
+ var httpsPortStr string
+ if !d.AllArgs(&httpsPortStr) {
+ return 0, d.ArgErr()
+ }
+ var err error
+ httpsPort, err = strconv.Atoi(httpsPortStr)
+ if err != nil {
+ return 0, d.Errf("converting port '%s' to integer value: %v", httpsPortStr, err)
+ }
+ return httpsPort, nil
+}
+
+func parseOptOrder(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+
+ // get directive name
+ if !d.Next() {
+ return nil, d.ArgErr()
+ }
+ dirName := d.Val()
+ if _, ok := registeredDirectives[dirName]; !ok {
+ return nil, d.Errf("%s is not a registered directive", dirName)
+ }
+
+ // get positional token
+ if !d.Next() {
+ return nil, d.ArgErr()
+ }
+ pos := Positional(d.Val())
+
+ // if directive already had an order, drop it
+ newOrder := slices.DeleteFunc(directiveOrder, func(d string) bool {
+ return d == dirName
+ })
+
+ // act on the positional; if it's First or Last, we're done right away
+ switch pos {
+ case First:
+ newOrder = append([]string{dirName}, newOrder...)
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ directiveOrder = newOrder
+ return newOrder, nil
+
+ case Last:
+ newOrder = append(newOrder, dirName)
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ directiveOrder = newOrder
+ return newOrder, nil
+
+ // if it's Before or After, continue
+ case Before:
+ case After:
+
+ default:
+ return nil, d.Errf("unknown positional '%s'", pos)
+ }
+
+ // get name of other directive
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ otherDir := d.Val()
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+
+ // get the position of the target directive
+ targetIndex := slices.Index(newOrder, otherDir)
+ if targetIndex == -1 {
+ return nil, d.Errf("directive '%s' not found", otherDir)
+ }
+ // if we're inserting after, we need to increment the index to go after
+ if pos == After {
+ targetIndex++
+ }
+ // insert the directive into the new order
+ newOrder = slices.Insert(newOrder, targetIndex, dirName)
+
+ directiveOrder = newOrder
+
+ return newOrder, nil
+}
+
+func parseOptStorage(d *caddyfile.Dispenser, _ any) (any, error) {
+ if !d.Next() { // consume option name
+ return nil, d.ArgErr()
+ }
+ if !d.Next() { // get storage module name
+ return nil, d.ArgErr()
+ }
+ modID := "caddy.storage." + d.Val()
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return nil, err
+ }
+ storage, ok := unm.(caddy.StorageConverter)
+ if !ok {
+ return nil, d.Errf("module %s is not a caddy.StorageConverter", modID)
+ }
+ return storage, nil
+}
+
+func parseStorageCheck(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ if !d.Next() {
+ return "", d.ArgErr()
+ }
+ val := d.Val()
+ if d.Next() {
+ return "", d.ArgErr()
+ }
+ if val != "off" {
+ return "", d.Errf("storage_check must be 'off'")
+ }
+ return val, nil
+}
+
+func parseStorageCleanInterval(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ if !d.Next() {
+ return "", d.ArgErr()
+ }
+ val := d.Val()
+ if d.Next() {
+ return "", d.ArgErr()
+ }
+ if val == "off" {
+ return false, nil
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, d.Errf("failed to parse storage_clean_interval, must be a duration or 'off' %w", err)
+ }
+ return caddy.Duration(dur), nil
+}
+
+func parseOptDuration(d *caddyfile.Dispenser, _ any) (any, error) {
+ if !d.Next() { // consume option name
+ return nil, d.ArgErr()
+ }
+ if !d.Next() { // get duration value
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, err
+ }
+ return caddy.Duration(dur), nil
+}
+
+func parseOptACMEDNS(d *caddyfile.Dispenser, _ any) (any, error) {
+ if !d.Next() { // consume option name
+ return nil, d.ArgErr()
+ }
+ if !d.Next() { // get DNS module name
+ return nil, d.ArgErr()
+ }
+ modID := "dns.providers." + d.Val()
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return nil, err
+ }
+ prov, ok := unm.(certmagic.DNSProvider)
+ if !ok {
+ return nil, d.Errf("module %s (%T) is not a certmagic.DNSProvider", modID, unm)
+ }
+ return prov, nil
+}
+
+func parseOptACMEEAB(d *caddyfile.Dispenser, _ any) (any, error) {
+ eab := new(acme.EAB)
+ d.Next() // consume option name
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "key_id":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ eab.KeyID = d.Val()
+
+ case "mac_key":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ eab.MACKey = d.Val()
+
+ default:
+ return nil, d.Errf("unrecognized parameter '%s'", d.Val())
+ }
+ }
+ return eab, nil
+}
+
+func parseOptCertIssuer(d *caddyfile.Dispenser, existing any) (any, error) {
+ d.Next() // consume option name
+
+ var issuers []certmagic.Issuer
+ if existing != nil {
+ issuers = existing.([]certmagic.Issuer)
+ }
+
+ // get issuer module name
+ if !d.Next() {
+ return nil, d.ArgErr()
+ }
+ modID := "tls.issuance." + d.Val()
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return nil, err
+ }
+ iss, ok := unm.(certmagic.Issuer)
+ if !ok {
+ return nil, d.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm)
+ }
+ issuers = append(issuers, iss)
+ return issuers, nil
+}
+
+func parseOptSingleString(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ if !d.Next() {
+ return "", d.ArgErr()
+ }
+ val := d.Val()
+ if d.Next() {
+ return "", d.ArgErr()
+ }
+ return val, nil
+}
+
+func parseOptDefaultBind(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+
+ var addresses, protocols []string
+ addresses = d.RemainingArgs()
+
+ if len(addresses) == 0 {
+ addresses = append(addresses, "")
+ }
+
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "protocols":
+ protocols = d.RemainingArgs()
+ if len(protocols) == 0 {
+ return nil, d.Errf("protocols requires one or more arguments")
+ }
+ default:
+ return nil, d.Errf("unknown subdirective: %s", d.Val())
+ }
+ }
+
+ return []ConfigValue{{Class: "bind", Value: addressesWithProtocols{
+ addresses: addresses,
+ protocols: protocols,
+ }}}, nil
+}
+
+func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+
+ adminCfg := new(caddy.AdminConfig)
+ if d.NextArg() {
+ listenAddress := d.Val()
+ if listenAddress == "off" {
+ adminCfg.Disabled = true
+ if d.Next() { // Do not accept any remaining options including block
+ return nil, d.Err("No more option is allowed after turning off admin config")
+ }
+ } else {
+ adminCfg.Listen = listenAddress
+ if d.NextArg() { // At most 1 arg is allowed
+ return nil, d.ArgErr()
+ }
+ }
+ }
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "enforce_origin":
+ adminCfg.EnforceOrigin = true
+
+ case "origins":
+ adminCfg.Origins = d.RemainingArgs()
+
+ default:
+ return nil, d.Errf("unrecognized parameter '%s'", d.Val())
+ }
+ }
+ if adminCfg.Listen == "" && !adminCfg.Disabled {
+ adminCfg.Listen = caddy.DefaultAdminListen
+ }
+ return adminCfg, nil
+}
+
+func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+
+ var ond *caddytls.OnDemandConfig
+
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "ask":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ if ond == nil {
+ ond = new(caddytls.OnDemandConfig)
+ }
+ if ond.PermissionRaw != nil {
+ return nil, d.Err("on-demand TLS permission module (or 'ask') already specified")
+ }
+ perm := caddytls.PermissionByHTTP{Endpoint: d.Val()}
+ ond.PermissionRaw = caddyconfig.JSONModuleObject(perm, "module", "http", nil)
+
+ case "permission":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ if ond == nil {
+ ond = new(caddytls.OnDemandConfig)
+ }
+ if ond.PermissionRaw != nil {
+ return nil, d.Err("on-demand TLS permission module (or 'ask') already specified")
+ }
+ modName := d.Val()
+ modID := "tls.permission." + modName
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return nil, err
+ }
+ perm, ok := unm.(caddytls.OnDemandPermission)
+ if !ok {
+ return nil, d.Errf("module %s (%T) is not an on-demand TLS permission module", modID, unm)
+ }
+ ond.PermissionRaw = caddyconfig.JSONModuleObject(perm, "module", modName, nil)
+
+ case "interval":
+ return nil, d.Errf("the on_demand_tls 'interval' option is no longer supported, remove it from your config")
+
+ case "burst":
+ return nil, d.Errf("the on_demand_tls 'burst' option is no longer supported, remove it from your config")
+
+ default:
+ return nil, d.Errf("unrecognized parameter '%s'", d.Val())
+ }
+ }
+ if ond == nil {
+ return nil, d.Err("expected at least one config parameter for on_demand_tls")
+ }
+ return ond, nil
+}
+
+func parseOptPersistConfig(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ if !d.Next() {
+ return "", d.ArgErr()
+ }
+ val := d.Val()
+ if d.Next() {
+ return "", d.ArgErr()
+ }
+ if val != "off" {
+ return "", d.Errf("persist_config must be 'off'")
+ }
+ return val, nil
+}
+
+func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ val := d.RemainingArgs()
+ if len(val) == 0 {
+ return "", d.ArgErr()
+ }
+ for _, v := range val {
+ switch v {
+ case "off":
+ case "disable_redirects":
+ case "disable_certs":
+ case "ignore_loaded_certs":
+ case "prefer_wildcard":
+ break
+
+ default:
+ return "", d.Errf("auto_https must be one of 'off', 'disable_redirects', 'disable_certs', 'ignore_loaded_certs', or 'prefer_wildcard'")
+ }
+ }
+ return val, nil
+}
+
+func unmarshalCaddyfileMetricsOptions(d *caddyfile.Dispenser) (any, error) {
+ d.Next() // consume option name
+ metrics := new(caddyhttp.Metrics)
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "per_host":
+ metrics.PerHost = true
+ default:
+ return nil, d.Errf("unrecognized servers option '%s'", d.Val())
+ }
+ }
+ return metrics, nil
+}
+
+func parseMetricsOptions(d *caddyfile.Dispenser, _ any) (any, error) {
+ return unmarshalCaddyfileMetricsOptions(d)
+}
+
+func parseServerOptions(d *caddyfile.Dispenser, _ any) (any, error) {
+ return unmarshalCaddyfileServerOptions(d)
+}
+
+func parseOCSPStaplingOptions(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next() // consume option name
+ var val string
+ if !d.AllArgs(&val) {
+ return nil, d.ArgErr()
+ }
+ if val != "off" {
+ return nil, d.Errf("invalid argument '%s'", val)
+ }
+ return certmagic.OCSPConfig{
+ DisableStapling: val == "off",
+ }, nil
+}
+
+// parseLogOptions parses the global log option. Syntax:
+//
+// log [name] {
+// output ...
+// format ...
+// level
+// include
+// exclude
+// }
+//
+// When the name argument is unspecified, this directive modifies the default
+// logger.
+func parseLogOptions(d *caddyfile.Dispenser, existingVal any) (any, error) {
+ currentNames := make(map[string]struct{})
+ if existingVal != nil {
+ innerVals, ok := existingVal.([]ConfigValue)
+ if !ok {
+ return nil, d.Errf("existing log values of unexpected type: %T", existingVal)
+ }
+ for _, rawVal := range innerVals {
+ val, ok := rawVal.Value.(namedCustomLog)
+ if !ok {
+ return nil, d.Errf("existing log value of unexpected type: %T", existingVal)
+ }
+ currentNames[val.name] = struct{}{}
+ }
+ }
+
+ var warnings []caddyconfig.Warning
+ // Call out the same parser that handles server-specific log configuration.
+ configValues, err := parseLogHelper(
+ Helper{
+ Dispenser: d,
+ warnings: &warnings,
+ },
+ currentNames,
+ )
+ if err != nil {
+ return nil, err
+ }
+ if len(warnings) > 0 {
+ return nil, d.Errf("warnings found in parsing global log options: %+v", warnings)
+ }
+
+ return configValues, nil
+}
+
+func parseOptPreferredChains(d *caddyfile.Dispenser, _ any) (any, error) {
+ d.Next()
+ return caddytls.ParseCaddyfilePreferredChainsOptions(d)
+}
diff --git a/caddyconfig/httpcaddyfile/options_test.go b/caddyconfig/httpcaddyfile/options_test.go
new file mode 100644
index 00000000000..bc9e8813404
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/options_test.go
@@ -0,0 +1,64 @@
+package httpcaddyfile
+
+import (
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ _ "github.com/caddyserver/caddy/v2/modules/logging"
+)
+
+func TestGlobalLogOptionSyntax(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ output string
+ expectError bool
+ }{
+ // NOTE: Additional test cases of successful Caddyfile parsing
+ // are present in: caddytest/integration/caddyfile_adapt/
+ {
+ input: `{
+ log default
+ }
+ `,
+ output: `{}`,
+ expectError: false,
+ },
+ {
+ input: `{
+ log example {
+ output file foo.log
+ }
+ log example {
+ format json
+ }
+ }
+ `,
+ expectError: true,
+ },
+ {
+ input: `{
+ log example /foo {
+ output file foo.log
+ }
+ }
+ `,
+ expectError: true,
+ },
+ } {
+
+ adapter := caddyfile.Adapter{
+ ServerType: ServerType{},
+ }
+
+ out, _, err := adapter.Adapt([]byte(tc.input), nil)
+
+ if err != nil != tc.expectError {
+ t.Errorf("Test %d error expectation failed Expected: %v, got %v", i, tc.expectError, err)
+ continue
+ }
+
+ if string(out) != tc.output {
+ t.Errorf("Test %d error output mismatch Expected: %s, got %s", i, tc.output, out)
+ }
+ }
+}
diff --git a/caddyconfig/httpcaddyfile/pkiapp.go b/caddyconfig/httpcaddyfile/pkiapp.go
new file mode 100644
index 00000000000..c57263baf92
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/pkiapp.go
@@ -0,0 +1,229 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddypki"
+)
+
+func init() {
+ RegisterGlobalOption("pki", parsePKIApp)
+}
+
+// parsePKIApp parses the global log option. Syntax:
+//
+// pki {
+// ca [] {
+// name
+// root_cn
+// intermediate_cn
+// intermediate_lifetime
+// root {
+// cert
+// key
+// format
+// }
+// intermediate {
+// cert
+// key
+// format
+// }
+// }
+// }
+//
+// When the CA ID is unspecified, 'local' is assumed.
+func parsePKIApp(d *caddyfile.Dispenser, existingVal any) (any, error) {
+ d.Next() // consume app name
+
+ pki := &caddypki.PKI{
+ CAs: make(map[string]*caddypki.CA),
+ }
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "ca":
+ pkiCa := new(caddypki.CA)
+ if d.NextArg() {
+ pkiCa.ID = d.Val()
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ }
+ if pkiCa.ID == "" {
+ pkiCa.ID = caddypki.DefaultCAID
+ }
+
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "name":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Name = d.Val()
+
+ case "root_cn":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.RootCommonName = d.Val()
+
+ case "intermediate_cn":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.IntermediateCommonName = d.Val()
+
+ case "intermediate_lifetime":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, err
+ }
+ pkiCa.IntermediateLifetime = caddy.Duration(dur)
+
+ case "root":
+ if pkiCa.Root == nil {
+ pkiCa.Root = new(caddypki.KeyPair)
+ }
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "cert":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Root.Certificate = d.Val()
+
+ case "key":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Root.PrivateKey = d.Val()
+
+ case "format":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Root.Format = d.Val()
+
+ default:
+ return nil, d.Errf("unrecognized pki ca root option '%s'", d.Val())
+ }
+ }
+
+ case "intermediate":
+ if pkiCa.Intermediate == nil {
+ pkiCa.Intermediate = new(caddypki.KeyPair)
+ }
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "cert":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Intermediate.Certificate = d.Val()
+
+ case "key":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Intermediate.PrivateKey = d.Val()
+
+ case "format":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ pkiCa.Intermediate.Format = d.Val()
+
+ default:
+ return nil, d.Errf("unrecognized pki ca intermediate option '%s'", d.Val())
+ }
+ }
+
+ default:
+ return nil, d.Errf("unrecognized pki ca option '%s'", d.Val())
+ }
+ }
+
+ pki.CAs[pkiCa.ID] = pkiCa
+
+ default:
+ return nil, d.Errf("unrecognized pki option '%s'", d.Val())
+ }
+ }
+ return pki, nil
+}
+
+func (st ServerType) buildPKIApp(
+ pairings []sbAddrAssociation,
+ options map[string]any,
+ warnings []caddyconfig.Warning,
+) (*caddypki.PKI, []caddyconfig.Warning, error) {
+ skipInstallTrust := false
+ if _, ok := options["skip_install_trust"]; ok {
+ skipInstallTrust = true
+ }
+ falseBool := false
+
+ // Load the PKI app configured via global options
+ var pkiApp *caddypki.PKI
+ unwrappedPki, ok := options["pki"].(*caddypki.PKI)
+ if ok {
+ pkiApp = unwrappedPki
+ } else {
+ pkiApp = &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}
+ }
+ for _, ca := range pkiApp.CAs {
+ if skipInstallTrust {
+ ca.InstallTrust = &falseBool
+ }
+ pkiApp.CAs[ca.ID] = ca
+ }
+
+ // Add in the CAs configured via directives
+ for _, p := range pairings {
+ for _, sblock := range p.serverBlocks {
+ // find all the CAs that were defined and add them to the app config
+ // i.e. from any "acme_server" directives
+ for _, caCfgValue := range sblock.pile["pki.ca"] {
+ ca := caCfgValue.Value.(*caddypki.CA)
+ if skipInstallTrust {
+ ca.InstallTrust = &falseBool
+ }
+
+ // the CA might already exist from global options, so
+ // don't overwrite it in that case
+ if _, ok := pkiApp.CAs[ca.ID]; !ok {
+ pkiApp.CAs[ca.ID] = ca
+ }
+ }
+ }
+ }
+
+ // if there was no CAs defined in any of the servers,
+ // and we were requested to not install trust, then
+ // add one for the default/local CA to do so
+ if len(pkiApp.CAs) == 0 && skipInstallTrust {
+ ca := new(caddypki.CA)
+ ca.ID = caddypki.DefaultCAID
+ ca.InstallTrust = &falseBool
+ pkiApp.CAs[ca.ID] = ca
+ }
+
+ return pkiApp, warnings, nil
+}
diff --git a/caddyconfig/httpcaddyfile/serveroptions.go b/caddyconfig/httpcaddyfile/serveroptions.go
new file mode 100644
index 00000000000..40a8af20962
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/serveroptions.go
@@ -0,0 +1,344 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "encoding/json"
+ "fmt"
+ "slices"
+
+ "github.com/dustin/go-humanize"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+// serverOptions collects server config overrides parsed from Caddyfile global options
+type serverOptions struct {
+ // If set, will only apply these options to servers that contain a
+ // listener address that matches exactly. If empty, will apply to all
+ // servers that were not already matched by another serverOptions.
+ ListenerAddress string
+
+ // These will all map 1:1 to the caddyhttp.Server struct
+ Name string
+ ListenerWrappersRaw []json.RawMessage
+ ReadTimeout caddy.Duration
+ ReadHeaderTimeout caddy.Duration
+ WriteTimeout caddy.Duration
+ IdleTimeout caddy.Duration
+ KeepAliveInterval caddy.Duration
+ MaxHeaderBytes int
+ EnableFullDuplex bool
+ Protocols []string
+ StrictSNIHost *bool
+ TrustedProxiesRaw json.RawMessage
+ TrustedProxiesStrict int
+ ClientIPHeaders []string
+ ShouldLogCredentials bool
+ Metrics *caddyhttp.Metrics
+ Trace bool // TODO: EXPERIMENTAL
+}
+
+func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) {
+ d.Next() // consume option name
+
+ serverOpts := serverOptions{}
+ if d.NextArg() {
+ serverOpts.ListenerAddress = d.Val()
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ }
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "name":
+ if serverOpts.ListenerAddress == "" {
+ return nil, d.Errf("cannot set a name for a server without a listener address")
+ }
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ serverOpts.Name = d.Val()
+
+ case "listener_wrappers":
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ modID := "caddy.listeners." + d.Val()
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return nil, err
+ }
+ listenerWrapper, ok := unm.(caddy.ListenerWrapper)
+ if !ok {
+ return nil, fmt.Errorf("module %s (%T) is not a listener wrapper", modID, unm)
+ }
+ jsonListenerWrapper := caddyconfig.JSONModuleObject(
+ listenerWrapper,
+ "wrapper",
+ listenerWrapper.(caddy.Module).CaddyModule().ID.Name(),
+ nil,
+ )
+ serverOpts.ListenerWrappersRaw = append(serverOpts.ListenerWrappersRaw, jsonListenerWrapper)
+ }
+
+ case "timeouts":
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "read_body":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, d.Errf("parsing read_body timeout duration: %v", err)
+ }
+ serverOpts.ReadTimeout = caddy.Duration(dur)
+
+ case "read_header":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, d.Errf("parsing read_header timeout duration: %v", err)
+ }
+ serverOpts.ReadHeaderTimeout = caddy.Duration(dur)
+
+ case "write":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, d.Errf("parsing write timeout duration: %v", err)
+ }
+ serverOpts.WriteTimeout = caddy.Duration(dur)
+
+ case "idle":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, d.Errf("parsing idle timeout duration: %v", err)
+ }
+ serverOpts.IdleTimeout = caddy.Duration(dur)
+
+ default:
+ return nil, d.Errf("unrecognized timeouts option '%s'", d.Val())
+ }
+ }
+ case "keepalive_interval":
+ if !d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return nil, d.Errf("parsing keepalive interval duration: %v", err)
+ }
+ serverOpts.KeepAliveInterval = caddy.Duration(dur)
+
+ case "max_header_size":
+ var sizeStr string
+ if !d.AllArgs(&sizeStr) {
+ return nil, d.ArgErr()
+ }
+ size, err := humanize.ParseBytes(sizeStr)
+ if err != nil {
+ return nil, d.Errf("parsing max_header_size: %v", err)
+ }
+ serverOpts.MaxHeaderBytes = int(size)
+
+ case "enable_full_duplex":
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ serverOpts.EnableFullDuplex = true
+
+ case "log_credentials":
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ serverOpts.ShouldLogCredentials = true
+
+ case "protocols":
+ protos := d.RemainingArgs()
+ for _, proto := range protos {
+ if proto != "h1" && proto != "h2" && proto != "h2c" && proto != "h3" {
+ return nil, d.Errf("unknown protocol '%s': expected h1, h2, h2c, or h3", proto)
+ }
+ if slices.Contains(serverOpts.Protocols, proto) {
+ return nil, d.Errf("protocol %s specified more than once", proto)
+ }
+ serverOpts.Protocols = append(serverOpts.Protocols, proto)
+ }
+ if nesting := d.Nesting(); d.NextBlock(nesting) {
+ return nil, d.ArgErr()
+ }
+
+ case "strict_sni_host":
+ if d.NextArg() && d.Val() != "insecure_off" && d.Val() != "on" {
+ return nil, d.Errf("strict_sni_host only supports 'on' or 'insecure_off', got '%s'", d.Val())
+ }
+ boolVal := true
+ if d.Val() == "insecure_off" {
+ boolVal = false
+ }
+ serverOpts.StrictSNIHost = &boolVal
+
+ case "trusted_proxies":
+ if !d.NextArg() {
+ return nil, d.Err("trusted_proxies expects an IP range source module name as its first argument")
+ }
+ modID := "http.ip_sources." + d.Val()
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return nil, err
+ }
+ source, ok := unm.(caddyhttp.IPRangeSource)
+ if !ok {
+ return nil, fmt.Errorf("module %s (%T) is not an IP range source", modID, unm)
+ }
+ jsonSource := caddyconfig.JSONModuleObject(
+ source,
+ "source",
+ source.(caddy.Module).CaddyModule().ID.Name(),
+ nil,
+ )
+ serverOpts.TrustedProxiesRaw = jsonSource
+
+ case "trusted_proxies_strict":
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ serverOpts.TrustedProxiesStrict = 1
+
+ case "client_ip_headers":
+ headers := d.RemainingArgs()
+ for _, header := range headers {
+ if slices.Contains(serverOpts.ClientIPHeaders, header) {
+ return nil, d.Errf("client IP header %s specified more than once", header)
+ }
+ serverOpts.ClientIPHeaders = append(serverOpts.ClientIPHeaders, header)
+ }
+ if nesting := d.Nesting(); d.NextBlock(nesting) {
+ return nil, d.ArgErr()
+ }
+
+ case "metrics":
+ caddy.Log().Warn("The nested 'metrics' option inside `servers` is deprecated and will be removed in the next major version. Use the global 'metrics' option instead.")
+ serverOpts.Metrics = new(caddyhttp.Metrics)
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "per_host":
+ serverOpts.Metrics.PerHost = true
+ }
+ }
+
+ case "trace":
+ if d.NextArg() {
+ return nil, d.ArgErr()
+ }
+ serverOpts.Trace = true
+
+ default:
+ return nil, d.Errf("unrecognized servers option '%s'", d.Val())
+ }
+ }
+ return serverOpts, nil
+}
+
+// applyServerOptions sets the server options on the appropriate servers
+func applyServerOptions(
+ servers map[string]*caddyhttp.Server,
+ options map[string]any,
+ _ *[]caddyconfig.Warning,
+) error {
+ serverOpts, ok := options["servers"].([]serverOptions)
+ if !ok {
+ return nil
+ }
+
+ // check for duplicate names, which would clobber the config
+ existingNames := map[string]bool{}
+ for _, opts := range serverOpts {
+ if opts.Name == "" {
+ continue
+ }
+ if existingNames[opts.Name] {
+ return fmt.Errorf("cannot use duplicate server name '%s'", opts.Name)
+ }
+ existingNames[opts.Name] = true
+ }
+
+ // collect the server name overrides
+ nameReplacements := map[string]string{}
+
+ for key, server := range servers {
+ // find the options that apply to this server
+ optsIndex := slices.IndexFunc(serverOpts, func(s serverOptions) bool {
+ return s.ListenerAddress == "" || slices.Contains(server.Listen, s.ListenerAddress)
+ })
+
+ // if none apply, then move to the next server
+ if optsIndex == -1 {
+ continue
+ }
+ opts := serverOpts[optsIndex]
+
+ // set all the options
+ server.ListenerWrappersRaw = opts.ListenerWrappersRaw
+ server.ReadTimeout = opts.ReadTimeout
+ server.ReadHeaderTimeout = opts.ReadHeaderTimeout
+ server.WriteTimeout = opts.WriteTimeout
+ server.IdleTimeout = opts.IdleTimeout
+ server.KeepAliveInterval = opts.KeepAliveInterval
+ server.MaxHeaderBytes = opts.MaxHeaderBytes
+ server.EnableFullDuplex = opts.EnableFullDuplex
+ server.Protocols = opts.Protocols
+ server.StrictSNIHost = opts.StrictSNIHost
+ server.TrustedProxiesRaw = opts.TrustedProxiesRaw
+ server.ClientIPHeaders = opts.ClientIPHeaders
+ server.TrustedProxiesStrict = opts.TrustedProxiesStrict
+ server.Metrics = opts.Metrics
+ if opts.ShouldLogCredentials {
+ if server.Logs == nil {
+ server.Logs = new(caddyhttp.ServerLogConfig)
+ }
+ server.Logs.ShouldLogCredentials = opts.ShouldLogCredentials
+ }
+ if opts.Trace {
+ // TODO: THIS IS EXPERIMENTAL (MAY 2024)
+ if server.Logs == nil {
+ server.Logs = new(caddyhttp.ServerLogConfig)
+ }
+ server.Logs.Trace = opts.Trace
+ }
+
+ if opts.Name != "" {
+ nameReplacements[key] = opts.Name
+ }
+ }
+
+ // rename the servers if marked to do so
+ for old, new := range nameReplacements {
+ servers[new] = servers[old]
+ delete(servers, old)
+ }
+
+ return nil
+}
diff --git a/caddyconfig/httpcaddyfile/shorthands.go b/caddyconfig/httpcaddyfile/shorthands.go
new file mode 100644
index 00000000000..ca6e4f92c90
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/shorthands.go
@@ -0,0 +1,102 @@
+package httpcaddyfile
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+)
+
+type ComplexShorthandReplacer struct {
+ search *regexp.Regexp
+ replace string
+}
+
+type ShorthandReplacer struct {
+ complex []ComplexShorthandReplacer
+ simple *strings.Replacer
+}
+
+func NewShorthandReplacer() ShorthandReplacer {
+ // replace shorthand placeholders (which are convenient
+ // when writing a Caddyfile) with their actual placeholder
+ // identifiers or variable names
+ replacer := strings.NewReplacer(placeholderShorthands()...)
+
+ // these are placeholders that allow a user-defined final
+ // parameters, but we still want to provide a shorthand
+ // for those, so we use a regexp to replace
+ regexpReplacements := []ComplexShorthandReplacer{
+ {regexp.MustCompile(`{header\.([\w-]*)}`), "{http.request.header.$1}"},
+ {regexp.MustCompile(`{cookie\.([\w-]*)}`), "{http.request.cookie.$1}"},
+ {regexp.MustCompile(`{labels\.([\w-]*)}`), "{http.request.host.labels.$1}"},
+ {regexp.MustCompile(`{path\.([\w-]*)}`), "{http.request.uri.path.$1}"},
+ {regexp.MustCompile(`{file\.([\w-]*)}`), "{http.request.uri.path.file.$1}"},
+ {regexp.MustCompile(`{query\.([\w-]*)}`), "{http.request.uri.query.$1}"},
+ {regexp.MustCompile(`{re\.([\w-\.]*)}`), "{http.regexp.$1}"},
+ {regexp.MustCompile(`{vars\.([\w-]*)}`), "{http.vars.$1}"},
+ {regexp.MustCompile(`{rp\.([\w-\.]*)}`), "{http.reverse_proxy.$1}"},
+ {regexp.MustCompile(`{resp\.([\w-\.]*)}`), "{http.intercept.$1}"},
+ {regexp.MustCompile(`{err\.([\w-\.]*)}`), "{http.error.$1}"},
+ {regexp.MustCompile(`{file_match\.([\w-]*)}`), "{http.matchers.file.$1}"},
+ }
+
+ return ShorthandReplacer{
+ complex: regexpReplacements,
+ simple: replacer,
+ }
+}
+
+// placeholderShorthands returns a slice of old-new string pairs,
+// where the left of the pair is a placeholder shorthand that may
+// be used in the Caddyfile, and the right is the replacement.
+func placeholderShorthands() []string {
+ return []string{
+ "{host}", "{http.request.host}",
+ "{hostport}", "{http.request.hostport}",
+ "{port}", "{http.request.port}",
+ "{orig_method}", "{http.request.orig_method}",
+ "{orig_uri}", "{http.request.orig_uri}",
+ "{orig_path}", "{http.request.orig_uri.path}",
+ "{orig_dir}", "{http.request.orig_uri.path.dir}",
+ "{orig_file}", "{http.request.orig_uri.path.file}",
+ "{orig_query}", "{http.request.orig_uri.query}",
+ "{orig_?query}", "{http.request.orig_uri.prefixed_query}",
+ "{method}", "{http.request.method}",
+ "{uri}", "{http.request.uri}",
+ "{path}", "{http.request.uri.path}",
+ "{dir}", "{http.request.uri.path.dir}",
+ "{file}", "{http.request.uri.path.file}",
+ "{query}", "{http.request.uri.query}",
+ "{?query}", "{http.request.uri.prefixed_query}",
+ "{remote}", "{http.request.remote}",
+ "{remote_host}", "{http.request.remote.host}",
+ "{remote_port}", "{http.request.remote.port}",
+ "{scheme}", "{http.request.scheme}",
+ "{uuid}", "{http.request.uuid}",
+ "{tls_cipher}", "{http.request.tls.cipher_suite}",
+ "{tls_version}", "{http.request.tls.version}",
+ "{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}",
+ "{tls_client_issuer}", "{http.request.tls.client.issuer}",
+ "{tls_client_serial}", "{http.request.tls.client.serial}",
+ "{tls_client_subject}", "{http.request.tls.client.subject}",
+ "{tls_client_certificate_pem}", "{http.request.tls.client.certificate_pem}",
+ "{tls_client_certificate_der_base64}", "{http.request.tls.client.certificate_der_base64}",
+ "{upstream_hostport}", "{http.reverse_proxy.upstream.hostport}",
+ "{client_ip}", "{http.vars.client_ip}",
+ }
+}
+
+// ApplyToSegment replaces shorthand placeholder to its full placeholder, understandable by Caddy.
+func (s ShorthandReplacer) ApplyToSegment(segment *caddyfile.Segment) {
+ if segment != nil {
+ for i := 0; i < len(*segment); i++ {
+ // simple string replacements
+ (*segment)[i].Text = s.simple.Replace((*segment)[i].Text)
+ // complex regexp replacements
+ for _, r := range s.complex {
+ (*segment)[i].Text = r.search.ReplaceAllString((*segment)[i].Text, r.replace)
+ }
+ }
+ }
+}
diff --git a/caddyconfig/httpcaddyfile/testdata/import_variadic.txt b/caddyconfig/httpcaddyfile/testdata/import_variadic.txt
new file mode 100644
index 00000000000..f1e50e0109f
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/testdata/import_variadic.txt
@@ -0,0 +1,9 @@
+(t2) {
+ respond 200 {
+ body {args[:]}
+ }
+}
+
+:8082 {
+ import t2 false
+}
\ No newline at end of file
diff --git a/caddyconfig/httpcaddyfile/testdata/import_variadic_snippet.txt b/caddyconfig/httpcaddyfile/testdata/import_variadic_snippet.txt
new file mode 100644
index 00000000000..a02fcf90a6f
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/testdata/import_variadic_snippet.txt
@@ -0,0 +1,9 @@
+(t1) {
+ respond 200 {
+ body {args[:]}
+ }
+}
+
+:8081 {
+ import t1 false
+}
\ No newline at end of file
diff --git a/caddyconfig/httpcaddyfile/testdata/import_variadic_with_import.txt b/caddyconfig/httpcaddyfile/testdata/import_variadic_with_import.txt
new file mode 100644
index 00000000000..ab1b32d90b7
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/testdata/import_variadic_with_import.txt
@@ -0,0 +1,15 @@
+(t1) {
+ respond 200 {
+ body {args[:]}
+ }
+}
+
+:8081 {
+ import t1 false
+}
+
+import import_variadic.txt
+
+:8083 {
+ import t2 true
+}
\ No newline at end of file
diff --git a/caddyconfig/httpcaddyfile/tlsapp.go b/caddyconfig/httpcaddyfile/tlsapp.go
new file mode 100644
index 00000000000..71b52492660
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/tlsapp.go
@@ -0,0 +1,804 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/caddyserver/certmagic"
+ "github.com/mholt/acmez/v3/acme"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
+)
+
+func (st ServerType) buildTLSApp(
+ pairings []sbAddrAssociation,
+ options map[string]any,
+ warnings []caddyconfig.Warning,
+) (*caddytls.TLS, []caddyconfig.Warning, error) {
+ tlsApp := &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}
+ var certLoaders []caddytls.CertificateLoader
+
+ httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
+ if hp, ok := options["http_port"].(int); ok {
+ httpPort = strconv.Itoa(hp)
+ }
+ autoHTTPS := []string{}
+ if ah, ok := options["auto_https"].([]string); ok {
+ autoHTTPS = ah
+ }
+
+ // find all hosts that share a server block with a hostless
+ // key, so that they don't get forgotten/omitted by auto-HTTPS
+ // (since they won't appear in route matchers)
+ httpsHostsSharedWithHostlessKey := make(map[string]struct{})
+ if !slices.Contains(autoHTTPS, "off") {
+ for _, pair := range pairings {
+ for _, sb := range pair.serverBlocks {
+ for _, addr := range sb.parsedKeys {
+ if addr.Host != "" {
+ continue
+ }
+
+ // this server block has a hostless key, now
+ // go through and add all the hosts to the set
+ for _, otherAddr := range sb.parsedKeys {
+ if otherAddr.Original == addr.Original {
+ continue
+ }
+ if otherAddr.Host != "" && otherAddr.Scheme != "http" && otherAddr.Port != httpPort {
+ httpsHostsSharedWithHostlessKey[otherAddr.Host] = struct{}{}
+ }
+ }
+ break
+ }
+ }
+ }
+ }
+
+ // a catch-all automation policy is used as a "default" for all subjects that
+ // don't have custom configuration explicitly associated with them; this
+ // is only to add if the global settings or defaults are non-empty
+ catchAllAP, err := newBaseAutomationPolicy(options, warnings, false)
+ if err != nil {
+ return nil, warnings, err
+ }
+ if catchAllAP != nil {
+ if tlsApp.Automation == nil {
+ tlsApp.Automation = new(caddytls.AutomationConfig)
+ }
+ tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, catchAllAP)
+ }
+
+ // collect all hosts that have a wildcard in them, and arent HTTP
+ wildcardHosts := []string{}
+ // hosts that have been explicitly marked to be automated,
+ // even if covered by another wildcard
+ forcedAutomatedNames := make(map[string]struct{})
+ for _, p := range pairings {
+ var addresses []string
+ for _, addressWithProtocols := range p.addressesWithProtocols {
+ addresses = append(addresses, addressWithProtocols.address)
+ }
+ if !listenersUseAnyPortOtherThan(addresses, httpPort) {
+ continue
+ }
+ for _, sblock := range p.serverBlocks {
+ for _, addr := range sblock.parsedKeys {
+ if strings.HasPrefix(addr.Host, "*.") {
+ wildcardHosts = append(wildcardHosts, addr.Host[2:])
+ }
+ }
+ }
+ }
+
+ for _, p := range pairings {
+ // avoid setting up TLS automation policies for a server that is HTTP-only
+ var addresses []string
+ for _, addressWithProtocols := range p.addressesWithProtocols {
+ addresses = append(addresses, addressWithProtocols.address)
+ }
+ if !listenersUseAnyPortOtherThan(addresses, httpPort) {
+ continue
+ }
+
+ for _, sblock := range p.serverBlocks {
+ // check the scheme of all the site addresses,
+ // skip building AP if they all had http://
+ if sblock.isAllHTTP() {
+ continue
+ }
+
+ // get values that populate an automation policy for this block
+ ap, err := newBaseAutomationPolicy(options, warnings, true)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ // make a plain copy so we can compare whether we made any changes
+ apCopy, err := newBaseAutomationPolicy(options, warnings, true)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ sblockHosts := sblock.hostsFromKeys(false)
+ if len(sblockHosts) == 0 && catchAllAP != nil {
+ ap = catchAllAP
+ }
+
+ // on-demand tls
+ if _, ok := sblock.pile["tls.on_demand"]; ok {
+ ap.OnDemand = true
+ }
+
+ // collect hosts that are forced to be automated
+ if _, ok := sblock.pile["tls.force_automate"]; ok {
+ for _, host := range sblockHosts {
+ forcedAutomatedNames[host] = struct{}{}
+ }
+ }
+
+ // reuse private keys tls
+ if _, ok := sblock.pile["tls.reuse_private_keys"]; ok {
+ ap.ReusePrivateKeys = true
+ }
+
+ if keyTypeVals, ok := sblock.pile["tls.key_type"]; ok {
+ ap.KeyType = keyTypeVals[0].Value.(string)
+ }
+
+ // certificate issuers
+ if issuerVals, ok := sblock.pile["tls.cert_issuer"]; ok {
+ var issuers []certmagic.Issuer
+ for _, issuerVal := range issuerVals {
+ issuers = append(issuers, issuerVal.Value.(certmagic.Issuer))
+ }
+ if ap == catchAllAP && !reflect.DeepEqual(ap.Issuers, issuers) {
+ // this more correctly implements an error check that was removed
+ // below; try it with this config:
+ //
+ // :443 {
+ // bind 127.0.0.1
+ // }
+ //
+ // :443 {
+ // bind ::1
+ // tls {
+ // issuer acme
+ // }
+ // }
+ return nil, warnings, fmt.Errorf("automation policy from site block is also default/catch-all policy because of key without hostname, and the two are in conflict: %#v != %#v", ap.Issuers, issuers)
+ }
+ ap.Issuers = issuers
+ }
+
+ // certificate managers
+ if certManagerVals, ok := sblock.pile["tls.cert_manager"]; ok {
+ for _, certManager := range certManagerVals {
+ certGetterName := certManager.Value.(caddy.Module).CaddyModule().ID.Name()
+ ap.ManagersRaw = append(ap.ManagersRaw, caddyconfig.JSONModuleObject(certManager.Value, "via", certGetterName, &warnings))
+ }
+ }
+ // custom bind host
+ for _, cfgVal := range sblock.pile["bind"] {
+ for _, iss := range ap.Issuers {
+ // if an issuer was already configured and it is NOT an ACME issuer,
+ // skip, since we intend to adjust only ACME issuers; ensure we
+ // include any issuer that embeds/wraps an underlying ACME issuer
+ var acmeIssuer *caddytls.ACMEIssuer
+ if acmeWrapper, ok := iss.(acmeCapable); ok {
+ acmeIssuer = acmeWrapper.GetACMEIssuer()
+ }
+ if acmeIssuer == nil {
+ continue
+ }
+
+ // proceed to configure the ACME issuer's bind host, without
+ // overwriting any existing settings
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.BindHost == "" {
+ // only binding to one host is supported
+ var bindHost string
+ if asserted, ok := cfgVal.Value.(addressesWithProtocols); ok && len(asserted.addresses) > 0 {
+ bindHost = asserted.addresses[0]
+ }
+ acmeIssuer.Challenges.BindHost = bindHost
+ }
+ }
+ }
+
+ // we used to ensure this block is allowed to create an automation policy;
+ // doing so was forbidden if it has a key with no host (i.e. ":443")
+ // and if there is a different server block that also has a key with no
+ // host -- since a key with no host matches any host, we need its
+ // associated automation policy to have an empty Subjects list, i.e. no
+ // host filter, which is indistinguishable between the two server blocks
+ // because automation is not done in the context of a particular server...
+ // this is an example of a poor mapping from Caddyfile to JSON but that's
+ // the least-leaky abstraction I could figure out -- however, this check
+ // was preventing certain listeners, like those provided by plugins, from
+ // being used as desired (see the Tailscale listener plugin), so I removed
+ // the check: and I think since I originally wrote the check I added a new
+ // check above which *properly* detects this ambiguity without breaking the
+ // listener plugin; see the check above with a commented example config
+ if len(sblockHosts) == 0 && catchAllAP == nil {
+ // this server block has a key with no hosts, but there is not yet
+ // a catch-all automation policy (probably because no global options
+ // were set), so this one becomes it
+ catchAllAP = ap
+ }
+
+ hostsNotHTTP := sblock.hostsFromKeysNotHTTP(httpPort)
+ sort.Strings(hostsNotHTTP) // solely for deterministic test results
+
+ // if the we prefer wildcards and the AP is unchanged,
+ // then we can skip this AP because it should be covered
+ // by an AP with a wildcard
+ if slices.Contains(autoHTTPS, "prefer_wildcard") {
+ if hostsCoveredByWildcard(hostsNotHTTP, wildcardHosts) &&
+ reflect.DeepEqual(ap, apCopy) {
+ continue
+ }
+ }
+
+ // associate our new automation policy with this server block's hosts
+ ap.SubjectsRaw = hostsNotHTTP
+
+ // if a combination of public and internal names were given
+ // for this same server block and no issuer was specified, we
+ // need to separate them out in the automation policies so
+ // that the internal names can use the internal issuer and
+ // the other names can use the default/public/ACME issuer
+ var ap2 *caddytls.AutomationPolicy
+ if len(ap.Issuers) == 0 {
+ var internal, external []string
+ for _, s := range ap.SubjectsRaw {
+ // do not create Issuers for Tailscale domains; they will be given a Manager instead
+ if isTailscaleDomain(s) {
+ continue
+ }
+ if !certmagic.SubjectQualifiesForCert(s) {
+ return nil, warnings, fmt.Errorf("subject does not qualify for certificate: '%s'", s)
+ }
+ // we don't use certmagic.SubjectQualifiesForPublicCert() because of one nuance:
+ // names like *.*.tld that may not qualify for a public certificate are actually
+ // fine when used with OnDemand, since OnDemand (currently) does not obtain
+ // wildcards (if it ever does, there will be a separate config option to enable
+ // it that we would need to check here) since the hostname is known at handshake;
+ // and it is unexpected to switch to internal issuer when the user wants to get
+ // regular certificates on-demand for a class of certs like *.*.tld.
+ if subjectQualifiesForPublicCert(ap, s) {
+ external = append(external, s)
+ } else {
+ internal = append(internal, s)
+ }
+ }
+ if len(external) > 0 && len(internal) > 0 {
+ ap.SubjectsRaw = external
+ apCopy := *ap
+ ap2 = &apCopy
+ ap2.SubjectsRaw = internal
+ ap2.IssuersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(caddytls.InternalIssuer{}, "module", "internal", &warnings)}
+ }
+ }
+
+ if tlsApp.Automation == nil {
+ tlsApp.Automation = new(caddytls.AutomationConfig)
+ }
+ tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap)
+ if ap2 != nil {
+ tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap2)
+ }
+
+ // certificate loaders
+ if clVals, ok := sblock.pile["tls.cert_loader"]; ok {
+ for _, clVal := range clVals {
+ certLoaders = append(certLoaders, clVal.Value.(caddytls.CertificateLoader))
+ }
+ }
+ }
+ }
+
+ // group certificate loaders by module name, then add to config
+ if len(certLoaders) > 0 {
+ loadersByName := make(map[string]caddytls.CertificateLoader)
+ for _, cl := range certLoaders {
+ name := caddy.GetModuleName(cl)
+ // ugh... technically, we may have multiple FileLoader and FolderLoader
+ // modules (because the tls directive returns one per occurrence), but
+ // the config structure expects only one instance of each kind of loader
+ // module, so we have to combine them... instead of enumerating each
+ // possible cert loader module in a type switch, we can use reflection,
+ // which works on any cert loaders that are slice types
+ if reflect.TypeOf(cl).Kind() == reflect.Slice {
+ combined := reflect.ValueOf(loadersByName[name])
+ if !combined.IsValid() {
+ combined = reflect.New(reflect.TypeOf(cl)).Elem()
+ }
+ clVal := reflect.ValueOf(cl)
+ for i := 0; i < clVal.Len(); i++ {
+ combined = reflect.Append(combined, clVal.Index(i))
+ }
+ loadersByName[name] = combined.Interface().(caddytls.CertificateLoader)
+ }
+ }
+ for certLoaderName, loaders := range loadersByName {
+ tlsApp.CertificatesRaw[certLoaderName] = caddyconfig.JSON(loaders, &warnings)
+ }
+ }
+
+ // set any of the on-demand options, for if/when on-demand TLS is enabled
+ if onDemand, ok := options["on_demand_tls"].(*caddytls.OnDemandConfig); ok {
+ if tlsApp.Automation == nil {
+ tlsApp.Automation = new(caddytls.AutomationConfig)
+ }
+ tlsApp.Automation.OnDemand = onDemand
+ }
+
+ // if the storage clean interval is a boolean, then it's "off" to disable cleaning
+ if sc, ok := options["storage_check"].(string); ok && sc == "off" {
+ tlsApp.DisableStorageCheck = true
+ }
+
+ // if the storage clean interval is a boolean, then it's "off" to disable cleaning
+ if sci, ok := options["storage_clean_interval"].(bool); ok && !sci {
+ tlsApp.DisableStorageClean = true
+ }
+
+ // set the storage clean interval if configured
+ if storageCleanInterval, ok := options["storage_clean_interval"].(caddy.Duration); ok {
+ if tlsApp.Automation == nil {
+ tlsApp.Automation = new(caddytls.AutomationConfig)
+ }
+ tlsApp.Automation.StorageCleanInterval = storageCleanInterval
+ }
+
+ // set the expired certificates renew interval if configured
+ if renewCheckInterval, ok := options["renew_interval"].(caddy.Duration); ok {
+ if tlsApp.Automation == nil {
+ tlsApp.Automation = new(caddytls.AutomationConfig)
+ }
+ tlsApp.Automation.RenewCheckInterval = renewCheckInterval
+ }
+
+ // set the OCSP check interval if configured
+ if ocspCheckInterval, ok := options["ocsp_interval"].(caddy.Duration); ok {
+ if tlsApp.Automation == nil {
+ tlsApp.Automation = new(caddytls.AutomationConfig)
+ }
+ tlsApp.Automation.OCSPCheckInterval = ocspCheckInterval
+ }
+
+ // set whether OCSP stapling should be disabled for manually-managed certificates
+ if ocspConfig, ok := options["ocsp_stapling"].(certmagic.OCSPConfig); ok {
+ tlsApp.DisableOCSPStapling = ocspConfig.DisableStapling
+ }
+
+ // if any hostnames appear on the same server block as a key with
+ // no host, they will not be used with route matchers because the
+ // hostless key matches all hosts, therefore, it wouldn't be
+ // considered for auto-HTTPS, so we need to make sure those hosts
+ // are manually considered for managed certificates; we also need
+ // to make sure that any of these names which are internal-only
+ // get internal certificates by default rather than ACME
+ var al caddytls.AutomateLoader
+ internalAP := &caddytls.AutomationPolicy{
+ IssuersRaw: []json.RawMessage{json.RawMessage(`{"module":"internal"}`)},
+ }
+ if !slices.Contains(autoHTTPS, "off") && !slices.Contains(autoHTTPS, "disable_certs") {
+ for h := range httpsHostsSharedWithHostlessKey {
+ al = append(al, h)
+ if !certmagic.SubjectQualifiesForPublicCert(h) {
+ internalAP.SubjectsRaw = append(internalAP.SubjectsRaw, h)
+ }
+ }
+ }
+ for name := range forcedAutomatedNames {
+ if slices.Contains(al, name) {
+ continue
+ }
+ al = append(al, name)
+ }
+ slices.Sort(al) // to stabilize the adapt output
+ if len(al) > 0 {
+ tlsApp.CertificatesRaw["automate"] = caddyconfig.JSON(al, &warnings)
+ }
+ if len(internalAP.SubjectsRaw) > 0 {
+ if tlsApp.Automation == nil {
+ tlsApp.Automation = new(caddytls.AutomationConfig)
+ }
+ tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, internalAP)
+ }
+
+ // if there are any global options set for issuers (ACME ones in particular), make sure they
+ // take effect in every automation policy that does not have any issuers
+ if tlsApp.Automation != nil {
+ globalEmail := options["email"]
+ globalACMECA := options["acme_ca"]
+ globalACMECARoot := options["acme_ca_root"]
+ globalACMEDNS := options["acme_dns"]
+ globalACMEEAB := options["acme_eab"]
+ globalPreferredChains := options["preferred_chains"]
+ hasGlobalACMEDefaults := globalEmail != nil || globalACMECA != nil || globalACMECARoot != nil || globalACMEDNS != nil || globalACMEEAB != nil || globalPreferredChains != nil
+ if hasGlobalACMEDefaults {
+ for i := 0; i < len(tlsApp.Automation.Policies); i++ {
+ ap := tlsApp.Automation.Policies[i]
+ if len(ap.Issuers) == 0 && automationPolicyHasAllPublicNames(ap) {
+ // for public names, create default issuers which will later be filled in with configured global defaults
+ // (internal names will implicitly use the internal issuer at auto-https time)
+ emailStr, _ := globalEmail.(string)
+ ap.Issuers = caddytls.DefaultIssuers(emailStr)
+
+ // if a specific endpoint is configured, can't use multiple default issuers
+ if globalACMECA != nil {
+ ap.Issuers = []certmagic.Issuer{new(caddytls.ACMEIssuer)}
+ }
+ }
+ }
+ }
+ }
+
+ // finalize and verify policies; do cleanup
+ if tlsApp.Automation != nil {
+ for i, ap := range tlsApp.Automation.Policies {
+ // ensure all issuers have global defaults filled in
+ for j, issuer := range ap.Issuers {
+ err := fillInGlobalACMEDefaults(issuer, options)
+ if err != nil {
+ return nil, warnings, fmt.Errorf("filling in global issuer defaults for AP %d, issuer %d: %v", i, j, err)
+ }
+ }
+
+ // encode all issuer values we created, so they will be rendered in the output
+ if len(ap.Issuers) > 0 && ap.IssuersRaw == nil {
+ for _, iss := range ap.Issuers {
+ issuerName := iss.(caddy.Module).CaddyModule().ID.Name()
+ ap.IssuersRaw = append(ap.IssuersRaw, caddyconfig.JSONModuleObject(iss, "module", issuerName, &warnings))
+ }
+ }
+ }
+
+ // consolidate automation policies that are the exact same
+ tlsApp.Automation.Policies = consolidateAutomationPolicies(tlsApp.Automation.Policies)
+
+ // ensure automation policies don't overlap subjects (this should be
+ // an error at provision-time as well, but catch it in the adapt phase
+ // for convenience)
+ automationHostSet := make(map[string]struct{})
+ for _, ap := range tlsApp.Automation.Policies {
+ for _, s := range ap.SubjectsRaw {
+ if _, ok := automationHostSet[s]; ok {
+ return nil, warnings, fmt.Errorf("hostname appears in more than one automation policy, making certificate management ambiguous: %s", s)
+ }
+ automationHostSet[s] = struct{}{}
+ }
+ }
+
+ // if nothing remains, remove any excess values to clean up the resulting config
+ if len(tlsApp.Automation.Policies) == 0 {
+ tlsApp.Automation.Policies = nil
+ }
+ if reflect.DeepEqual(tlsApp.Automation, new(caddytls.AutomationConfig)) {
+ tlsApp.Automation = nil
+ }
+ }
+
+ return tlsApp, warnings, nil
+}
+
+type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer }
+
+func fillInGlobalACMEDefaults(issuer certmagic.Issuer, options map[string]any) error {
+ acmeWrapper, ok := issuer.(acmeCapable)
+ if !ok {
+ return nil
+ }
+ acmeIssuer := acmeWrapper.GetACMEIssuer()
+ if acmeIssuer == nil {
+ return nil
+ }
+
+ globalEmail := options["email"]
+ globalACMECA := options["acme_ca"]
+ globalACMECARoot := options["acme_ca_root"]
+ globalACMEDNS := options["acme_dns"]
+ globalACMEEAB := options["acme_eab"]
+ globalPreferredChains := options["preferred_chains"]
+ globalCertLifetime := options["cert_lifetime"]
+ globalHTTPPort, globalHTTPSPort := options["http_port"], options["https_port"]
+
+ if globalEmail != nil && acmeIssuer.Email == "" {
+ acmeIssuer.Email = globalEmail.(string)
+ }
+ if globalACMECA != nil && acmeIssuer.CA == "" {
+ acmeIssuer.CA = globalACMECA.(string)
+ }
+ if globalACMECARoot != nil && !slices.Contains(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string)) {
+ acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string))
+ }
+ if globalACMEDNS != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.DNS == nil) {
+ acmeIssuer.Challenges = &caddytls.ChallengesConfig{
+ DNS: &caddytls.DNSChallengeConfig{
+ ProviderRaw: caddyconfig.JSONModuleObject(globalACMEDNS, "name", globalACMEDNS.(caddy.Module).CaddyModule().ID.Name(), nil),
+ },
+ }
+ }
+ if globalACMEEAB != nil && acmeIssuer.ExternalAccount == nil {
+ acmeIssuer.ExternalAccount = globalACMEEAB.(*acme.EAB)
+ }
+ if globalPreferredChains != nil && acmeIssuer.PreferredChains == nil {
+ acmeIssuer.PreferredChains = globalPreferredChains.(*caddytls.ChainPreference)
+ }
+ if globalHTTPPort != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.HTTP == nil || acmeIssuer.Challenges.HTTP.AlternatePort == 0) {
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.HTTP == nil {
+ acmeIssuer.Challenges.HTTP = new(caddytls.HTTPChallengeConfig)
+ }
+ acmeIssuer.Challenges.HTTP.AlternatePort = globalHTTPPort.(int)
+ }
+ if globalHTTPSPort != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.TLSALPN == nil || acmeIssuer.Challenges.TLSALPN.AlternatePort == 0) {
+ if acmeIssuer.Challenges == nil {
+ acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
+ }
+ if acmeIssuer.Challenges.TLSALPN == nil {
+ acmeIssuer.Challenges.TLSALPN = new(caddytls.TLSALPNChallengeConfig)
+ }
+ acmeIssuer.Challenges.TLSALPN.AlternatePort = globalHTTPSPort.(int)
+ }
+ if globalCertLifetime != nil && acmeIssuer.CertificateLifetime == 0 {
+ acmeIssuer.CertificateLifetime = globalCertLifetime.(caddy.Duration)
+ }
+ return nil
+}
+
+// newBaseAutomationPolicy returns a new TLS automation policy that gets
+// its values from the global options map. It should be used as the base
+// for any other automation policies. A nil policy (and no error) will be
+// returned if there are no default/global options. However, if always is
+// true, a non-nil value will always be returned (unless there is an error).
+func newBaseAutomationPolicy(
+ options map[string]any,
+ _ []caddyconfig.Warning,
+ always bool,
+) (*caddytls.AutomationPolicy, error) {
+ issuers, hasIssuers := options["cert_issuer"]
+ _, hasLocalCerts := options["local_certs"]
+ keyType, hasKeyType := options["key_type"]
+ ocspStapling, hasOCSPStapling := options["ocsp_stapling"]
+
+ hasGlobalAutomationOpts := hasIssuers || hasLocalCerts || hasKeyType || hasOCSPStapling
+
+ // if there are no global options related to automation policies
+ // set, then we can just return right away
+ if !hasGlobalAutomationOpts {
+ if always {
+ return new(caddytls.AutomationPolicy), nil
+ }
+ return nil, nil
+ }
+
+ ap := new(caddytls.AutomationPolicy)
+ if hasKeyType {
+ ap.KeyType = keyType.(string)
+ }
+
+ if hasIssuers && hasLocalCerts {
+ return nil, fmt.Errorf("global options are ambiguous: local_certs is confusing when combined with cert_issuer, because local_certs is also a specific kind of issuer")
+ }
+
+ if hasIssuers {
+ ap.Issuers = issuers.([]certmagic.Issuer)
+ } else if hasLocalCerts {
+ ap.Issuers = []certmagic.Issuer{new(caddytls.InternalIssuer)}
+ }
+
+ if hasOCSPStapling {
+ ocspConfig := ocspStapling.(certmagic.OCSPConfig)
+ ap.DisableOCSPStapling = ocspConfig.DisableStapling
+ ap.OCSPOverrides = ocspConfig.ResponderOverrides
+ }
+
+ return ap, nil
+}
+
+// consolidateAutomationPolicies combines automation policies that are the same,
+// for a cleaner overall output.
+func consolidateAutomationPolicies(aps []*caddytls.AutomationPolicy) []*caddytls.AutomationPolicy {
+ // sort from most specific to least specific; we depend on this ordering
+ sort.SliceStable(aps, func(i, j int) bool {
+ if automationPolicyIsSubset(aps[i], aps[j]) {
+ return true
+ }
+ if automationPolicyIsSubset(aps[j], aps[i]) {
+ return false
+ }
+ return len(aps[i].SubjectsRaw) > len(aps[j].SubjectsRaw)
+ })
+
+ emptyAPCount := 0
+ origLenAPs := len(aps)
+ // compute the number of empty policies (disregarding subjects) - see #4128
+ emptyAP := new(caddytls.AutomationPolicy)
+ for i := 0; i < len(aps); i++ {
+ emptyAP.SubjectsRaw = aps[i].SubjectsRaw
+ if reflect.DeepEqual(aps[i], emptyAP) {
+ emptyAPCount++
+ if !automationPolicyHasAllPublicNames(aps[i]) {
+ // if this automation policy has internal names, we might as well remove it
+ // so auto-https can implicitly use the internal issuer
+ aps = slices.Delete(aps, i, i+1)
+ i--
+ }
+ }
+ }
+ // If all policies are empty, we can return nil, as there is no need to set any policy
+ if emptyAPCount == origLenAPs {
+ return nil
+ }
+
+ // remove or combine duplicate policies
+outer:
+ for i := 0; i < len(aps); i++ {
+ // compare only with next policies; we sorted by specificity so we must not delete earlier policies
+ for j := i + 1; j < len(aps); j++ {
+ // if they're exactly equal in every way, just keep one of them
+ if reflect.DeepEqual(aps[i], aps[j]) {
+ aps = slices.Delete(aps, j, j+1)
+ // must re-evaluate current i against next j; can't skip it!
+ // even if i decrements to -1, will be incremented to 0 immediately
+ i--
+ continue outer
+ }
+
+ // if the policy is the same, we can keep just one, but we have
+ // to be careful which one we keep; if only one has any hostnames
+ // defined, then we need to keep the one without any hostnames,
+ // otherwise the one without any subjects (a catch-all) would be
+ // eaten up by the one with subjects; and if both have subjects, we
+ // need to combine their lists
+ if reflect.DeepEqual(aps[i].IssuersRaw, aps[j].IssuersRaw) &&
+ reflect.DeepEqual(aps[i].ManagersRaw, aps[j].ManagersRaw) &&
+ bytes.Equal(aps[i].StorageRaw, aps[j].StorageRaw) &&
+ aps[i].MustStaple == aps[j].MustStaple &&
+ aps[i].KeyType == aps[j].KeyType &&
+ aps[i].OnDemand == aps[j].OnDemand &&
+ aps[i].ReusePrivateKeys == aps[j].ReusePrivateKeys &&
+ aps[i].RenewalWindowRatio == aps[j].RenewalWindowRatio {
+ if len(aps[i].SubjectsRaw) > 0 && len(aps[j].SubjectsRaw) == 0 {
+ // later policy (at j) has no subjects ("catch-all"), so we can
+ // remove the identical-but-more-specific policy that comes first
+ // AS LONG AS it is not shadowed by another policy before it; e.g.
+ // if policy i is for example.com, policy i+1 is '*.com', and policy
+ // j is catch-all, we cannot remove policy i because that would
+ // cause example.com to be served by the less specific policy for
+ // '*.com', which might be different (yes we've seen this happen)
+ if automationPolicyShadows(i, aps) >= j {
+ aps = slices.Delete(aps, i, i+1)
+ i--
+ continue outer
+ }
+ } else {
+ // avoid repeated subjects
+ for _, subj := range aps[j].SubjectsRaw {
+ if !slices.Contains(aps[i].SubjectsRaw, subj) {
+ aps[i].SubjectsRaw = append(aps[i].SubjectsRaw, subj)
+ }
+ }
+ aps = slices.Delete(aps, j, j+1)
+ j--
+ }
+ }
+ }
+ }
+
+ return aps
+}
+
+// automationPolicyIsSubset returns true if a's subjects are a subset
+// of b's subjects.
+func automationPolicyIsSubset(a, b *caddytls.AutomationPolicy) bool {
+ if len(b.SubjectsRaw) == 0 {
+ return true
+ }
+ if len(a.SubjectsRaw) == 0 {
+ return false
+ }
+ for _, aSubj := range a.SubjectsRaw {
+ inSuperset := slices.ContainsFunc(b.SubjectsRaw, func(bSubj string) bool {
+ return certmagic.MatchWildcard(aSubj, bSubj)
+ })
+ if !inSuperset {
+ return false
+ }
+ }
+ return true
+}
+
+// automationPolicyShadows returns the index of a policy that aps[i] shadows;
+// in other words, for all policies after position i, if that policy covers
+// the same subjects but is less specific, that policy's position is returned,
+// or -1 if no shadowing is found. For example, if policy i is for
+// "foo.example.com" and policy i+2 is for "*.example.com", then i+2 will be
+// returned, since that policy is shadowed by i, which is in front.
+func automationPolicyShadows(i int, aps []*caddytls.AutomationPolicy) int {
+ for j := i + 1; j < len(aps); j++ {
+ if automationPolicyIsSubset(aps[i], aps[j]) {
+ return j
+ }
+ }
+ return -1
+}
+
+// subjectQualifiesForPublicCert is like certmagic.SubjectQualifiesForPublicCert() except
+// that this allows domains with multiple wildcard levels like '*.*.example.com' to qualify
+// if the automation policy has OnDemand enabled (i.e. this function is more lenient).
+//
+// IP subjects are considered as non-qualifying for public certs. Technically, there are
+// now public ACME CAs as well as non-ACME CAs that issue IP certificates. But this function
+// is used solely for implicit automation (defaults), where it gets really complicated to
+// keep track of which issuers support IP certificates in which circumstances. Currently,
+// issuers that support IP certificates are very few, and all require some sort of config
+// from the user anyway (such as an account credential). Since we cannot implicitly and
+// automatically get public IP certs without configuration from the user, we treat IPs as
+// not qualifying for public certificates. Users should expressly configure an issuer
+// that supports IP certs for that purpose.
+func subjectQualifiesForPublicCert(ap *caddytls.AutomationPolicy, subj string) bool {
+ return !certmagic.SubjectIsIP(subj) &&
+ !certmagic.SubjectIsInternal(subj) &&
+ (strings.Count(subj, "*.") < 2 || ap.OnDemand)
+}
+
+// automationPolicyHasAllPublicNames returns true if all the names on the policy
+// do NOT qualify for public certs OR are tailscale domains.
+func automationPolicyHasAllPublicNames(ap *caddytls.AutomationPolicy) bool {
+ return !slices.ContainsFunc(ap.SubjectsRaw, func(i string) bool {
+ return !subjectQualifiesForPublicCert(ap, i) || isTailscaleDomain(i)
+ })
+}
+
+func isTailscaleDomain(name string) bool {
+ return strings.HasSuffix(strings.ToLower(name), ".ts.net")
+}
+
+func hostsCoveredByWildcard(hosts []string, wildcards []string) bool {
+ if len(hosts) == 0 || len(wildcards) == 0 {
+ return false
+ }
+ for _, host := range hosts {
+ for _, wildcard := range wildcards {
+ if strings.HasPrefix(host, "*.") {
+ continue
+ }
+ if certmagic.MatchWildcard(host, "*."+wildcard) {
+ return true
+ }
+ }
+ }
+ return false
+}
diff --git a/caddyconfig/httpcaddyfile/tlsapp_test.go b/caddyconfig/httpcaddyfile/tlsapp_test.go
new file mode 100644
index 00000000000..d8edbdf9b19
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/tlsapp_test.go
@@ -0,0 +1,56 @@
+package httpcaddyfile
+
+import (
+ "testing"
+
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
+)
+
+func TestAutomationPolicyIsSubset(t *testing.T) {
+ for i, test := range []struct {
+ a, b []string
+ expect bool
+ }{
+ {
+ a: []string{"example.com"},
+ b: []string{},
+ expect: true,
+ },
+ {
+ a: []string{},
+ b: []string{"example.com"},
+ expect: false,
+ },
+ {
+ a: []string{"foo.example.com"},
+ b: []string{"*.example.com"},
+ expect: true,
+ },
+ {
+ a: []string{"foo.example.com"},
+ b: []string{"foo.example.com"},
+ expect: true,
+ },
+ {
+ a: []string{"foo.example.com"},
+ b: []string{"example.com"},
+ expect: false,
+ },
+ {
+ a: []string{"example.com", "foo.example.com"},
+ b: []string{"*.com", "*.*.com"},
+ expect: true,
+ },
+ {
+ a: []string{"example.com", "foo.example.com"},
+ b: []string{"*.com"},
+ expect: false,
+ },
+ } {
+ apA := &caddytls.AutomationPolicy{SubjectsRaw: test.a}
+ apB := &caddytls.AutomationPolicy{SubjectsRaw: test.b}
+ if actual := automationPolicyIsSubset(apA, apB); actual != test.expect {
+ t.Errorf("Test %d: Expected %t but got %t (A: %v B: %v)", i, test.expect, actual, test.a, test.b)
+ }
+ }
+}
diff --git a/caddyconfig/httploader.go b/caddyconfig/httploader.go
new file mode 100644
index 00000000000..a25041a3435
--- /dev/null
+++ b/caddyconfig/httploader.go
@@ -0,0 +1,218 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+func init() {
+ caddy.RegisterModule(HTTPLoader{})
+}
+
+// HTTPLoader can load Caddy configs over HTTP(S).
+//
+// If the response is not a JSON config, a config adapter must be specified
+// either in the loader config (`adapter`), or in the Content-Type HTTP header
+// returned in the HTTP response from the server. The Content-Type header is
+// read just like the admin API's `/load` endpoint. If you don't have control
+// over the HTTP server (but can still trust its response), you can override
+// the Content-Type header by setting the `adapter` property in this config.
+type HTTPLoader struct {
+ // The method for the request. Default: GET
+ Method string `json:"method,omitempty"`
+
+ // The URL of the request.
+ URL string `json:"url,omitempty"`
+
+ // HTTP headers to add to the request.
+ Headers http.Header `json:"header,omitempty"`
+
+ // Maximum time allowed for a complete connection and request.
+ Timeout caddy.Duration `json:"timeout,omitempty"`
+
+ // The name of the config adapter to use, if any. Only needed
+ // if the HTTP response is not a JSON config and if the server's
+ // Content-Type header is missing or incorrect.
+ Adapter string `json:"adapter,omitempty"`
+
+ TLS *struct {
+ // Present this instance's managed remote identity credentials to the server.
+ UseServerIdentity bool `json:"use_server_identity,omitempty"`
+
+ // PEM-encoded client certificate filename to present to the server.
+ ClientCertificateFile string `json:"client_certificate_file,omitempty"`
+
+ // PEM-encoded key to use with the client certificate.
+ ClientCertificateKeyFile string `json:"client_certificate_key_file,omitempty"`
+
+ // List of PEM-encoded CA certificate files to add to the same trust
+ // store as RootCAPool (or root_ca_pool in the JSON).
+ RootCAPEMFiles []string `json:"root_ca_pem_files,omitempty"`
+ } `json:"tls,omitempty"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (HTTPLoader) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "caddy.config_loaders.http",
+ New: func() caddy.Module { return new(HTTPLoader) },
+ }
+}
+
+// LoadConfig loads a Caddy config.
+func (hl HTTPLoader) LoadConfig(ctx caddy.Context) ([]byte, error) {
+ repl := caddy.NewReplacer()
+
+ client, err := hl.makeClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ method := repl.ReplaceAll(hl.Method, "")
+ if method == "" {
+ method = http.MethodGet
+ }
+
+ url := repl.ReplaceAll(hl.URL, "")
+ req, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ return nil, err
+ }
+ for key, vals := range hl.Headers {
+ for _, val := range vals {
+ req.Header.Add(repl.ReplaceAll(key, ""), repl.ReplaceKnown(val, ""))
+ }
+ }
+
+ resp, err := doHttpCallWithRetries(ctx, client, req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode >= 400 {
+ return nil, fmt.Errorf("server responded with HTTP %d", resp.StatusCode)
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ // adapt the config based on either manually-configured adapter or server's response header
+ ct := resp.Header.Get("Content-Type")
+ if hl.Adapter != "" {
+ ct = "text/" + hl.Adapter
+ }
+ result, warnings, err := adaptByContentType(ct, body)
+ if err != nil {
+ return nil, err
+ }
+ for _, warn := range warnings {
+ ctx.Logger().Warn(warn.String())
+ }
+
+ return result, nil
+}
+
+func attemptHttpCall(client *http.Client, request *http.Request) (*http.Response, error) {
+ resp, err := client.Do(request)
+ if err != nil {
+ return nil, fmt.Errorf("problem calling http loader url: %v", err)
+ } else if resp.StatusCode < 200 || resp.StatusCode > 499 {
+ resp.Body.Close()
+ return nil, fmt.Errorf("bad response status code from http loader url: %v", resp.StatusCode)
+ }
+ return resp, nil
+}
+
+func doHttpCallWithRetries(ctx caddy.Context, client *http.Client, request *http.Request) (*http.Response, error) {
+ var resp *http.Response
+ var err error
+ const maxAttempts = 10
+
+ for i := 0; i < maxAttempts; i++ {
+ resp, err = attemptHttpCall(client, request)
+ if err != nil && i < maxAttempts-1 {
+ select {
+ case <-time.After(time.Millisecond * 500):
+ case <-ctx.Done():
+ return resp, ctx.Err()
+ }
+ } else {
+ break
+ }
+ }
+
+ return resp, err
+}
+
+func (hl HTTPLoader) makeClient(ctx caddy.Context) (*http.Client, error) {
+ client := &http.Client{
+ Timeout: time.Duration(hl.Timeout),
+ }
+
+ if hl.TLS != nil {
+ var tlsConfig *tls.Config
+
+ // client authentication
+ if hl.TLS.UseServerIdentity {
+ certs, err := ctx.IdentityCredentials(ctx.Logger())
+ if err != nil {
+ return nil, fmt.Errorf("getting server identity credentials: %v", err)
+ }
+ // See https://github.com/securego/gosec/issues/1054#issuecomment-2072235199
+ //nolint:gosec
+ tlsConfig = &tls.Config{Certificates: certs}
+ } else if hl.TLS.ClientCertificateFile != "" && hl.TLS.ClientCertificateKeyFile != "" {
+ cert, err := tls.LoadX509KeyPair(hl.TLS.ClientCertificateFile, hl.TLS.ClientCertificateKeyFile)
+ if err != nil {
+ return nil, err
+ }
+ //nolint:gosec
+ tlsConfig = &tls.Config{Certificates: []tls.Certificate{cert}}
+ }
+
+ // trusted server certs
+ if len(hl.TLS.RootCAPEMFiles) > 0 {
+ rootPool := x509.NewCertPool()
+ for _, pemFile := range hl.TLS.RootCAPEMFiles {
+ pemData, err := os.ReadFile(pemFile)
+ if err != nil {
+ return nil, fmt.Errorf("failed reading ca cert: %v", err)
+ }
+ rootPool.AppendCertsFromPEM(pemData)
+ }
+ if tlsConfig == nil {
+ tlsConfig = new(tls.Config)
+ }
+ tlsConfig.RootCAs = rootPool
+ }
+
+ client.Transport = &http.Transport{TLSClientConfig: tlsConfig}
+ }
+
+ return client, nil
+}
+
+var _ caddy.ConfigLoader = (*HTTPLoader)(nil)
diff --git a/caddyconfig/load.go b/caddyconfig/load.go
new file mode 100644
index 00000000000..9f5cda9050f
--- /dev/null
+++ b/caddyconfig/load.go
@@ -0,0 +1,214 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyconfig
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "mime"
+ "net/http"
+ "strings"
+ "sync"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+func init() {
+ caddy.RegisterModule(adminLoad{})
+}
+
+// adminLoad is a module that provides the /load endpoint
+// for the Caddy admin API. The only reason it's not baked
+// into the caddy package directly is because of the import
+// of the caddyconfig package for its GetAdapter function.
+// If the caddy package depends on the caddyconfig package,
+// then the caddyconfig package will not be able to import
+// the caddy package, and it can more easily cause backward
+// edges in the dependency tree (i.e. import cycle).
+// Fortunately, the admin API has first-class support for
+// adding endpoints from modules.
+type adminLoad struct{}
+
+// CaddyModule returns the Caddy module information.
+func (adminLoad) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "admin.api.load",
+ New: func() caddy.Module { return new(adminLoad) },
+ }
+}
+
+// Routes returns a route for the /load endpoint.
+func (al adminLoad) Routes() []caddy.AdminRoute {
+ return []caddy.AdminRoute{
+ {
+ Pattern: "/load",
+ Handler: caddy.AdminHandlerFunc(al.handleLoad),
+ },
+ {
+ Pattern: "/adapt",
+ Handler: caddy.AdminHandlerFunc(al.handleAdapt),
+ },
+ }
+}
+
+// handleLoad replaces the entire current configuration with
+// a new one provided in the response body. It supports config
+// adapters through the use of the Content-Type header. A
+// config that is identical to the currently-running config
+// will be a no-op unless Cache-Control: must-revalidate is set.
+func (adminLoad) handleLoad(w http.ResponseWriter, r *http.Request) error {
+ if r.Method != http.MethodPost {
+ return caddy.APIError{
+ HTTPStatus: http.StatusMethodNotAllowed,
+ Err: fmt.Errorf("method not allowed"),
+ }
+ }
+
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+
+ _, err := io.Copy(buf, r.Body)
+ if err != nil {
+ return caddy.APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("reading request body: %v", err),
+ }
+ }
+ body := buf.Bytes()
+
+ // if the config is formatted other than Caddy's native
+ // JSON, we need to adapt it before loading it
+ if ctHeader := r.Header.Get("Content-Type"); ctHeader != "" {
+ result, warnings, err := adaptByContentType(ctHeader, body)
+ if err != nil {
+ return caddy.APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: err,
+ }
+ }
+ if len(warnings) > 0 {
+ respBody, err := json.Marshal(warnings)
+ if err != nil {
+ caddy.Log().Named("admin.api.load").Error(err.Error())
+ }
+ _, _ = w.Write(respBody)
+ }
+ body = result
+ }
+
+ forceReload := r.Header.Get("Cache-Control") == "must-revalidate"
+
+ err = caddy.Load(body, forceReload)
+ if err != nil {
+ return caddy.APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("loading config: %v", err),
+ }
+ }
+
+ caddy.Log().Named("admin.api").Info("load complete")
+
+ return nil
+}
+
+// handleAdapt adapts the given Caddy config to JSON and responds with the result.
+func (adminLoad) handleAdapt(w http.ResponseWriter, r *http.Request) error {
+ if r.Method != http.MethodPost {
+ return caddy.APIError{
+ HTTPStatus: http.StatusMethodNotAllowed,
+ Err: fmt.Errorf("method not allowed"),
+ }
+ }
+
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+
+ _, err := io.Copy(buf, r.Body)
+ if err != nil {
+ return caddy.APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("reading request body: %v", err),
+ }
+ }
+
+ result, warnings, err := adaptByContentType(r.Header.Get("Content-Type"), buf.Bytes())
+ if err != nil {
+ return caddy.APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: err,
+ }
+ }
+
+ out := struct {
+ Warnings []Warning `json:"warnings,omitempty"`
+ Result json.RawMessage `json:"result"`
+ }{
+ Warnings: warnings,
+ Result: result,
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ return json.NewEncoder(w).Encode(out)
+}
+
+// adaptByContentType adapts body to Caddy JSON using the adapter specified by contentType.
+// If contentType is empty or ends with "/json", the input will be returned, as a no-op.
+func adaptByContentType(contentType string, body []byte) ([]byte, []Warning, error) {
+ // assume JSON as the default
+ if contentType == "" {
+ return body, nil, nil
+ }
+
+ ct, _, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return nil, nil, caddy.APIError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: fmt.Errorf("invalid Content-Type: %v", err),
+ }
+ }
+
+ // if already JSON, no need to adapt
+ if strings.HasSuffix(ct, "/json") {
+ return body, nil, nil
+ }
+
+ // adapter name should be suffix of MIME type
+ _, adapterName, slashFound := strings.Cut(ct, "/")
+ if !slashFound {
+ return nil, nil, fmt.Errorf("malformed Content-Type")
+ }
+
+ cfgAdapter := GetAdapter(adapterName)
+ if cfgAdapter == nil {
+ return nil, nil, fmt.Errorf("unrecognized config adapter '%s'", adapterName)
+ }
+
+ result, warnings, err := cfgAdapter.Adapt(body, nil)
+ if err != nil {
+ return nil, nil, fmt.Errorf("adapting config using %s adapter: %v", adapterName, err)
+ }
+
+ return result, warnings, nil
+}
+
+var bufPool = sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+}
diff --git a/caddyfile/dispenser.go b/caddyfile/dispenser.go
deleted file mode 100644
index edb7bfafa39..00000000000
--- a/caddyfile/dispenser.go
+++ /dev/null
@@ -1,246 +0,0 @@
-package caddyfile
-
-import (
- "errors"
- "fmt"
- "io"
- "strings"
-)
-
-// Dispenser is a type that dispenses tokens, similarly to a lexer,
-// except that it can do so with some notion of structure and has
-// some really convenient methods.
-type Dispenser struct {
- filename string
- tokens []Token
- cursor int
- nesting int
-}
-
-// NewDispenser returns a Dispenser, ready to use for parsing the given input.
-func NewDispenser(filename string, input io.Reader) Dispenser {
- tokens, _ := allTokens(input) // ignoring error because nothing to do with it
- return Dispenser{
- filename: filename,
- tokens: tokens,
- cursor: -1,
- }
-}
-
-// NewDispenserTokens returns a Dispenser filled with the given tokens.
-func NewDispenserTokens(filename string, tokens []Token) Dispenser {
- return Dispenser{
- filename: filename,
- tokens: tokens,
- cursor: -1,
- }
-}
-
-// Next loads the next token. Returns true if a token
-// was loaded; false otherwise. If false, all tokens
-// have been consumed.
-func (d *Dispenser) Next() bool {
- if d.cursor < len(d.tokens)-1 {
- d.cursor++
- return true
- }
- return false
-}
-
-// NextArg loads the next token if it is on the same
-// line. Returns true if a token was loaded; false
-// otherwise. If false, all tokens on the line have
-// been consumed. It handles imported tokens correctly.
-func (d *Dispenser) NextArg() bool {
- if d.cursor < 0 {
- d.cursor++
- return true
- }
- if d.cursor >= len(d.tokens) {
- return false
- }
- if d.cursor < len(d.tokens)-1 &&
- d.tokens[d.cursor].File == d.tokens[d.cursor+1].File &&
- d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) == d.tokens[d.cursor+1].Line {
- d.cursor++
- return true
- }
- return false
-}
-
-// NextLine loads the next token only if it is not on the same
-// line as the current token, and returns true if a token was
-// loaded; false otherwise. If false, there is not another token
-// or it is on the same line. It handles imported tokens correctly.
-func (d *Dispenser) NextLine() bool {
- if d.cursor < 0 {
- d.cursor++
- return true
- }
- if d.cursor >= len(d.tokens) {
- return false
- }
- if d.cursor < len(d.tokens)-1 &&
- (d.tokens[d.cursor].File != d.tokens[d.cursor+1].File ||
- d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) < d.tokens[d.cursor+1].Line) {
- d.cursor++
- return true
- }
- return false
-}
-
-// NextBlock can be used as the condition of a for loop
-// to load the next token as long as it opens a block or
-// is already in a block. It returns true if a token was
-// loaded, or false when the block's closing curly brace
-// was loaded and thus the block ended. Nested blocks are
-// not supported.
-func (d *Dispenser) NextBlock() bool {
- if d.nesting > 0 {
- d.Next()
- if d.Val() == "}" {
- d.nesting--
- return false
- }
- return true
- }
- if !d.NextArg() { // block must open on same line
- return false
- }
- if d.Val() != "{" {
- d.cursor-- // roll back if not opening brace
- return false
- }
- d.Next()
- if d.Val() == "}" {
- // Open and then closed right away
- return false
- }
- d.nesting++
- return true
-}
-
-// Val gets the text of the current token. If there is no token
-// loaded, it returns empty string.
-func (d *Dispenser) Val() string {
- if d.cursor < 0 || d.cursor >= len(d.tokens) {
- return ""
- }
- return d.tokens[d.cursor].Text
-}
-
-// Line gets the line number of the current token. If there is no token
-// loaded, it returns 0.
-func (d *Dispenser) Line() int {
- if d.cursor < 0 || d.cursor >= len(d.tokens) {
- return 0
- }
- return d.tokens[d.cursor].Line
-}
-
-// File gets the filename of the current token. If there is no token loaded,
-// it returns the filename originally given when parsing started.
-func (d *Dispenser) File() string {
- if d.cursor < 0 || d.cursor >= len(d.tokens) {
- return d.filename
- }
- if tokenFilename := d.tokens[d.cursor].File; tokenFilename != "" {
- return tokenFilename
- }
- return d.filename
-}
-
-// Args is a convenience function that loads the next arguments
-// (tokens on the same line) into an arbitrary number of strings
-// pointed to in targets. If there are fewer tokens available
-// than string pointers, the remaining strings will not be changed
-// and false will be returned. If there were enough tokens available
-// to fill the arguments, then true will be returned.
-func (d *Dispenser) Args(targets ...*string) bool {
- enough := true
- for i := 0; i < len(targets); i++ {
- if !d.NextArg() {
- enough = false
- break
- }
- *targets[i] = d.Val()
- }
- return enough
-}
-
-// RemainingArgs loads any more arguments (tokens on the same line)
-// into a slice and returns them. Open curly brace tokens also indicate
-// the end of arguments, and the curly brace is not included in
-// the return value nor is it loaded.
-func (d *Dispenser) RemainingArgs() []string {
- var args []string
-
- for d.NextArg() {
- if d.Val() == "{" {
- d.cursor--
- break
- }
- args = append(args, d.Val())
- }
-
- return args
-}
-
-// ArgErr returns an argument error, meaning that another
-// argument was expected but not found. In other words,
-// a line break or open curly brace was encountered instead of
-// an argument.
-func (d *Dispenser) ArgErr() error {
- if d.Val() == "{" {
- return d.Err("Unexpected token '{', expecting argument")
- }
- return d.Errf("Wrong argument count or unexpected line ending after '%s'", d.Val())
-}
-
-// SyntaxErr creates a generic syntax error which explains what was
-// found and what was expected.
-func (d *Dispenser) SyntaxErr(expected string) error {
- msg := fmt.Sprintf("%s:%d - Syntax error: Unexpected token '%s', expecting '%s'", d.File(), d.Line(), d.Val(), expected)
- return errors.New(msg)
-}
-
-// EOFErr returns an error indicating that the dispenser reached
-// the end of the input when searching for the next token.
-func (d *Dispenser) EOFErr() error {
- return d.Errf("Unexpected EOF")
-}
-
-// Err generates a custom parse error with a message of msg.
-func (d *Dispenser) Err(msg string) error {
- msg = fmt.Sprintf("%s:%d - Parse error: %s", d.File(), d.Line(), msg)
- return errors.New(msg)
-}
-
-// Errf is like Err, but for formatted error messages
-func (d *Dispenser) Errf(format string, args ...interface{}) error {
- return d.Err(fmt.Sprintf(format, args...))
-}
-
-// numLineBreaks counts how many line breaks are in the token
-// value given by the token index tknIdx. It returns 0 if the
-// token does not exist or there are no line breaks.
-func (d *Dispenser) numLineBreaks(tknIdx int) int {
- if tknIdx < 0 || tknIdx >= len(d.tokens) {
- return 0
- }
- return strings.Count(d.tokens[tknIdx].Text, "\n")
-}
-
-// isNewLine determines whether the current token is on a different
-// line (higher line number) than the previous token. It handles imported
-// tokens correctly. If there isn't a previous token, it returns true.
-func (d *Dispenser) isNewLine() bool {
- if d.cursor < 1 {
- return true
- }
- if d.cursor > len(d.tokens)-1 {
- return false
- }
- return d.tokens[d.cursor-1].File != d.tokens[d.cursor].File ||
- d.tokens[d.cursor-1].Line+d.numLineBreaks(d.cursor-1) < d.tokens[d.cursor].Line
-}
diff --git a/caddyfile/json.go b/caddyfile/json.go
deleted file mode 100644
index 16aab4e3d29..00000000000
--- a/caddyfile/json.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package caddyfile
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "sort"
- "strconv"
- "strings"
-)
-
-const filename = "Caddyfile"
-
-// ToJSON converts caddyfile to its JSON representation.
-func ToJSON(caddyfile []byte) ([]byte, error) {
- var j EncodedCaddyfile
-
- serverBlocks, err := Parse(filename, bytes.NewReader(caddyfile), nil)
- if err != nil {
- return nil, err
- }
-
- for _, sb := range serverBlocks {
- block := EncodedServerBlock{
- Keys: sb.Keys,
- Body: [][]interface{}{},
- }
-
- // Extract directives deterministically by sorting them
- var directives = make([]string, len(sb.Tokens))
- for dir := range sb.Tokens {
- directives = append(directives, dir)
- }
- sort.Strings(directives)
-
- // Convert each directive's tokens into our JSON structure
- for _, dir := range directives {
- disp := NewDispenserTokens(filename, sb.Tokens[dir])
- for disp.Next() {
- block.Body = append(block.Body, constructLine(&disp))
- }
- }
-
- // tack this block onto the end of the list
- j = append(j, block)
- }
-
- result, err := json.Marshal(j)
- if err != nil {
- return nil, err
- }
-
- return result, nil
-}
-
-// constructLine transforms tokens into a JSON-encodable structure;
-// but only one line at a time, to be used at the top-level of
-// a server block only (where the first token on each line is a
-// directive) - not to be used at any other nesting level.
-func constructLine(d *Dispenser) []interface{} {
- var args []interface{}
-
- args = append(args, d.Val())
-
- for d.NextArg() {
- if d.Val() == "{" {
- args = append(args, constructBlock(d))
- continue
- }
- args = append(args, d.Val())
- }
-
- return args
-}
-
-// constructBlock recursively processes tokens into a
-// JSON-encodable structure. To be used in a directive's
-// block. Goes to end of block.
-func constructBlock(d *Dispenser) [][]interface{} {
- block := [][]interface{}{}
-
- for d.Next() {
- if d.Val() == "}" {
- break
- }
- block = append(block, constructLine(d))
- }
-
- return block
-}
-
-// FromJSON converts JSON-encoded jsonBytes to Caddyfile text
-func FromJSON(jsonBytes []byte) ([]byte, error) {
- var j EncodedCaddyfile
- var result string
-
- err := json.Unmarshal(jsonBytes, &j)
- if err != nil {
- return nil, err
- }
-
- for sbPos, sb := range j {
- if sbPos > 0 {
- result += "\n\n"
- }
- for i, key := range sb.Keys {
- if i > 0 {
- result += ", "
- }
- //result += standardizeScheme(key)
- result += key
- }
- result += jsonToText(sb.Body, 1)
- }
-
- return []byte(result), nil
-}
-
-// jsonToText recursively transforms a scope of JSON into plain
-// Caddyfile text.
-func jsonToText(scope interface{}, depth int) string {
- var result string
-
- switch val := scope.(type) {
- case string:
- if strings.ContainsAny(val, "\" \n\t\r") {
- result += `"` + strings.Replace(val, "\"", "\\\"", -1) + `"`
- } else {
- result += val
- }
- case int:
- result += strconv.Itoa(val)
- case float64:
- result += fmt.Sprintf("%v", val)
- case bool:
- result += fmt.Sprintf("%t", val)
- case [][]interface{}:
- result += " {\n"
- for _, arg := range val {
- result += strings.Repeat("\t", depth) + jsonToText(arg, depth+1) + "\n"
- }
- result += strings.Repeat("\t", depth-1) + "}"
- case []interface{}:
- for i, v := range val {
- if block, ok := v.([]interface{}); ok {
- result += "{\n"
- for _, arg := range block {
- result += strings.Repeat("\t", depth) + jsonToText(arg, depth+1) + "\n"
- }
- result += strings.Repeat("\t", depth-1) + "}"
- continue
- }
- result += jsonToText(v, depth)
- if i < len(val)-1 {
- result += " "
- }
- }
- }
-
- return result
-}
-
-// TODO: Will this function come in handy somewhere else?
-/*
-// standardizeScheme turns an address like host:https into https://host,
-// or "host:" into "host".
-func standardizeScheme(addr string) string {
- if hostname, port, err := net.SplitHostPort(addr); err == nil {
- if port == "http" || port == "https" {
- addr = port + "://" + hostname
- }
- }
- return strings.TrimSuffix(addr, ":")
-}
-*/
-
-// EncodedCaddyfile encapsulates a slice of EncodedServerBlocks.
-type EncodedCaddyfile []EncodedServerBlock
-
-// EncodedServerBlock represents a server block ripe for encoding.
-type EncodedServerBlock struct {
- Keys []string `json:"keys"`
- Body [][]interface{} `json:"body"`
-}
diff --git a/caddyfile/json_test.go b/caddyfile/json_test.go
deleted file mode 100644
index 97d553c3368..00000000000
--- a/caddyfile/json_test.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package caddyfile
-
-import "testing"
-
-var tests = []struct {
- caddyfile, json string
-}{
- { // 0
- caddyfile: `foo {
- root /bar
-}`,
- json: `[{"keys":["foo"],"body":[["root","/bar"]]}]`,
- },
- { // 1
- caddyfile: `host1, host2 {
- dir {
- def
- }
-}`,
- json: `[{"keys":["host1","host2"],"body":[["dir",[["def"]]]]}]`,
- },
- { // 2
- caddyfile: `host1, host2 {
- dir abc {
- def ghi
- jkl
- }
-}`,
- json: `[{"keys":["host1","host2"],"body":[["dir","abc",[["def","ghi"],["jkl"]]]]}]`,
- },
- { // 3
- caddyfile: `host1:1234, host2:5678 {
- dir abc {
- }
-}`,
- json: `[{"keys":["host1:1234","host2:5678"],"body":[["dir","abc",[]]]}]`,
- },
- { // 4
- caddyfile: `host {
- foo "bar baz"
-}`,
- json: `[{"keys":["host"],"body":[["foo","bar baz"]]}]`,
- },
- { // 5
- caddyfile: `host, host:80 {
- foo "bar \"baz\""
-}`,
- json: `[{"keys":["host","host:80"],"body":[["foo","bar \"baz\""]]}]`,
- },
- { // 6
- caddyfile: `host {
- foo "bar
-baz"
-}`,
- json: `[{"keys":["host"],"body":[["foo","bar\nbaz"]]}]`,
- },
- { // 7
- caddyfile: `host {
- dir 123 4.56 true
-}`,
- json: `[{"keys":["host"],"body":[["dir","123","4.56","true"]]}]`, // NOTE: I guess we assume numbers and booleans should be encoded as strings...?
- },
- { // 8
- caddyfile: `http://host, https://host {
-}`,
- json: `[{"keys":["http://host","https://host"],"body":[]}]`, // hosts in JSON are always host:port format (if port is specified), for consistency
- },
- { // 9
- caddyfile: `host {
- dir1 a b
- dir2 c d
-}`,
- json: `[{"keys":["host"],"body":[["dir1","a","b"],["dir2","c","d"]]}]`,
- },
- { // 10
- caddyfile: `host {
- dir a b
- dir c d
-}`,
- json: `[{"keys":["host"],"body":[["dir","a","b"],["dir","c","d"]]}]`,
- },
- { // 11
- caddyfile: `host {
- dir1 a b
- dir2 {
- c
- d
- }
-}`,
- json: `[{"keys":["host"],"body":[["dir1","a","b"],["dir2",[["c"],["d"]]]]}]`,
- },
- { // 12
- caddyfile: `host1 {
- dir1
-}
-
-host2 {
- dir2
-}`,
- json: `[{"keys":["host1"],"body":[["dir1"]]},{"keys":["host2"],"body":[["dir2"]]}]`,
- },
-}
-
-func TestToJSON(t *testing.T) {
- for i, test := range tests {
- output, err := ToJSON([]byte(test.caddyfile))
- if err != nil {
- t.Errorf("Test %d: %v", i, err)
- }
- if string(output) != test.json {
- t.Errorf("Test %d\nExpected:\n'%s'\nActual:\n'%s'", i, test.json, string(output))
- }
- }
-}
-
-func TestFromJSON(t *testing.T) {
- for i, test := range tests {
- output, err := FromJSON([]byte(test.json))
- if err != nil {
- t.Errorf("Test %d: %v", i, err)
- }
- if string(output) != test.caddyfile {
- t.Errorf("Test %d\nExpected:\n'%s'\nActual:\n'%s'", i, test.caddyfile, string(output))
- }
- }
-}
-
-// TODO: Will these tests come in handy somewhere else?
-/*
-func TestStandardizeAddress(t *testing.T) {
- // host:https should be converted to https://host
- output, err := ToJSON([]byte(`host:https`))
- if err != nil {
- t.Fatal(err)
- }
- if expected, actual := `[{"keys":["https://host"],"body":[]}]`, string(output); expected != actual {
- t.Errorf("Expected:\n'%s'\nActual:\n'%s'", expected, actual)
- }
-
- output, err = FromJSON([]byte(`[{"keys":["https://host"],"body":[]}]`))
- if err != nil {
- t.Fatal(err)
- }
- if expected, actual := "https://host {\n}", string(output); expected != actual {
- t.Errorf("Expected:\n'%s'\nActual:\n'%s'", expected, actual)
- }
-
- // host: should be converted to just host
- output, err = ToJSON([]byte(`host:`))
- if err != nil {
- t.Fatal(err)
- }
- if expected, actual := `[{"keys":["host"],"body":[]}]`, string(output); expected != actual {
- t.Errorf("Expected:\n'%s'\nActual:\n'%s'", expected, actual)
- }
- output, err = FromJSON([]byte(`[{"keys":["host:"],"body":[]}]`))
- if err != nil {
- t.Fatal(err)
- }
- if expected, actual := "host {\n}", string(output); expected != actual {
- t.Errorf("Expected:\n'%s'\nActual:\n'%s'", expected, actual)
- }
-}
-*/
diff --git a/caddyfile/lexer.go b/caddyfile/lexer.go
deleted file mode 100644
index ea3bad99931..00000000000
--- a/caddyfile/lexer.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package caddyfile
-
-import (
- "bufio"
- "io"
- "unicode"
-)
-
-type (
- // lexer is a utility which can get values, token by
- // token, from a Reader. A token is a word, and tokens
- // are separated by whitespace. A word can be enclosed
- // in quotes if it contains whitespace.
- lexer struct {
- reader *bufio.Reader
- token Token
- line int
- }
-
- // Token represents a single parsable unit.
- Token struct {
- File string
- Line int
- Text string
- }
-)
-
-// load prepares the lexer to scan an input for tokens.
-// It discards any leading byte order mark.
-func (l *lexer) load(input io.Reader) error {
- l.reader = bufio.NewReader(input)
- l.line = 1
-
- // discard byte order mark, if present
- firstCh, _, err := l.reader.ReadRune()
- if err != nil {
- return err
- }
- if firstCh != 0xFEFF {
- err := l.reader.UnreadRune()
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// next loads the next token into the lexer.
-// A token is delimited by whitespace, unless
-// the token starts with a quotes character (")
-// in which case the token goes until the closing
-// quotes (the enclosing quotes are not included).
-// Inside quoted strings, quotes may be escaped
-// with a preceding \ character. No other chars
-// may be escaped. The rest of the line is skipped
-// if a "#" character is read in. Returns true if
-// a token was loaded; false otherwise.
-func (l *lexer) next() bool {
- var val []rune
- var comment, quoted, escaped bool
-
- makeToken := func() bool {
- l.token.Text = string(val)
- return true
- }
-
- for {
- ch, _, err := l.reader.ReadRune()
- if err != nil {
- if len(val) > 0 {
- return makeToken()
- }
- if err == io.EOF {
- return false
- }
- panic(err)
- }
-
- if quoted {
- if !escaped {
- if ch == '\\' {
- escaped = true
- continue
- } else if ch == '"' {
- quoted = false
- return makeToken()
- }
- }
- if ch == '\n' {
- l.line++
- }
- if escaped {
- // only escape quotes
- if ch != '"' {
- val = append(val, '\\')
- }
- }
- val = append(val, ch)
- escaped = false
- continue
- }
-
- if unicode.IsSpace(ch) {
- if ch == '\r' {
- continue
- }
- if ch == '\n' {
- l.line++
- comment = false
- }
- if len(val) > 0 {
- return makeToken()
- }
- continue
- }
-
- if ch == '#' {
- comment = true
- }
-
- if comment {
- continue
- }
-
- if len(val) == 0 {
- l.token = Token{Line: l.line}
- if ch == '"' {
- quoted = true
- continue
- }
- }
-
- val = append(val, ch)
- }
-}
diff --git a/caddyfile/lexer_test.go b/caddyfile/lexer_test.go
deleted file mode 100644
index 2a0b175c36e..00000000000
--- a/caddyfile/lexer_test.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package caddyfile
-
-import (
- "strings"
- "testing"
-)
-
-type lexerTestCase struct {
- input string
- expected []Token
-}
-
-func TestLexer(t *testing.T) {
- testCases := []lexerTestCase{
- {
- input: `host:123`,
- expected: []Token{
- {Line: 1, Text: "host:123"},
- },
- },
- {
- input: `host:123
-
- directive`,
- expected: []Token{
- {Line: 1, Text: "host:123"},
- {Line: 3, Text: "directive"},
- },
- },
- {
- input: `host:123 {
- directive
- }`,
- expected: []Token{
- {Line: 1, Text: "host:123"},
- {Line: 1, Text: "{"},
- {Line: 2, Text: "directive"},
- {Line: 3, Text: "}"},
- },
- },
- {
- input: `host:123 { directive }`,
- expected: []Token{
- {Line: 1, Text: "host:123"},
- {Line: 1, Text: "{"},
- {Line: 1, Text: "directive"},
- {Line: 1, Text: "}"},
- },
- },
- {
- input: `host:123 {
- #comment
- directive
- # comment
- foobar # another comment
- }`,
- expected: []Token{
- {Line: 1, Text: "host:123"},
- {Line: 1, Text: "{"},
- {Line: 3, Text: "directive"},
- {Line: 5, Text: "foobar"},
- {Line: 6, Text: "}"},
- },
- },
- {
- input: `a "quoted value" b
- foobar`,
- expected: []Token{
- {Line: 1, Text: "a"},
- {Line: 1, Text: "quoted value"},
- {Line: 1, Text: "b"},
- {Line: 2, Text: "foobar"},
- },
- },
- {
- input: `A "quoted \"value\" inside" B`,
- expected: []Token{
- {Line: 1, Text: "A"},
- {Line: 1, Text: `quoted "value" inside`},
- {Line: 1, Text: "B"},
- },
- },
- {
- input: `"don't\escape"`,
- expected: []Token{
- {Line: 1, Text: `don't\escape`},
- },
- },
- {
- input: `"don't\\escape"`,
- expected: []Token{
- {Line: 1, Text: `don't\\escape`},
- },
- },
- {
- input: `A "quoted value with line
- break inside" {
- foobar
- }`,
- expected: []Token{
- {Line: 1, Text: "A"},
- {Line: 1, Text: "quoted value with line\n\t\t\t\t\tbreak inside"},
- {Line: 2, Text: "{"},
- {Line: 3, Text: "foobar"},
- {Line: 4, Text: "}"},
- },
- },
- {
- input: `"C:\php\php-cgi.exe"`,
- expected: []Token{
- {Line: 1, Text: `C:\php\php-cgi.exe`},
- },
- },
- {
- input: `empty "" string`,
- expected: []Token{
- {Line: 1, Text: `empty`},
- {Line: 1, Text: ``},
- {Line: 1, Text: `string`},
- },
- },
- {
- input: "skip those\r\nCR characters",
- expected: []Token{
- {Line: 1, Text: "skip"},
- {Line: 1, Text: "those"},
- {Line: 2, Text: "CR"},
- {Line: 2, Text: "characters"},
- },
- },
- {
- input: "\xEF\xBB\xBF:8080", // test with leading byte order mark
- expected: []Token{
- {Line: 1, Text: ":8080"},
- },
- },
- }
-
- for i, testCase := range testCases {
- actual := tokenize(testCase.input)
- lexerCompare(t, i, testCase.expected, actual)
- }
-}
-
-func tokenize(input string) (tokens []Token) {
- l := lexer{}
- l.load(strings.NewReader(input))
- for l.next() {
- tokens = append(tokens, l.token)
- }
- return
-}
-
-func lexerCompare(t *testing.T, n int, expected, actual []Token) {
- if len(expected) != len(actual) {
- t.Errorf("Test case %d: expected %d token(s) but got %d", n, len(expected), len(actual))
- }
-
- for i := 0; i < len(actual) && i < len(expected); i++ {
- if actual[i].Line != expected[i].Line {
- t.Errorf("Test case %d token %d ('%s'): expected line %d but was line %d",
- n, i, expected[i].Text, expected[i].Line, actual[i].Line)
- break
- }
- if actual[i].Text != expected[i].Text {
- t.Errorf("Test case %d token %d: expected text '%s' but was '%s'",
- n, i, expected[i].Text, actual[i].Text)
- break
- }
- }
-}
diff --git a/caddyfile/parse.go b/caddyfile/parse.go
deleted file mode 100644
index 13b23db05be..00000000000
--- a/caddyfile/parse.go
+++ /dev/null
@@ -1,416 +0,0 @@
-package caddyfile
-
-import (
- "io"
- "log"
- "os"
- "path/filepath"
- "strings"
-)
-
-// Parse parses the input just enough to group tokens, in
-// order, by server block. No further parsing is performed.
-// Server blocks are returned in the order in which they appear.
-// Directives that do not appear in validDirectives will cause
-// an error. If you do not want to check for valid directives,
-// pass in nil instead.
-func Parse(filename string, input io.Reader, validDirectives []string) ([]ServerBlock, error) {
- p := parser{Dispenser: NewDispenser(filename, input), validDirectives: validDirectives}
- return p.parseAll()
-}
-
-// allTokens lexes the entire input, but does not parse it.
-// It returns all the tokens from the input, unstructured
-// and in order.
-func allTokens(input io.Reader) ([]Token, error) {
- l := new(lexer)
- err := l.load(input)
- if err != nil {
- return nil, err
- }
- var tokens []Token
- for l.next() {
- tokens = append(tokens, l.token)
- }
- return tokens, nil
-}
-
-type parser struct {
- Dispenser
- block ServerBlock // current server block being parsed
- validDirectives []string // a directive must be valid or it's an error
- eof bool // if we encounter a valid EOF in a hard place
-}
-
-func (p *parser) parseAll() ([]ServerBlock, error) {
- var blocks []ServerBlock
-
- for p.Next() {
- err := p.parseOne()
- if err != nil {
- return blocks, err
- }
- if len(p.block.Keys) > 0 {
- blocks = append(blocks, p.block)
- }
- }
-
- return blocks, nil
-}
-
-func (p *parser) parseOne() error {
- p.block = ServerBlock{Tokens: make(map[string][]Token)}
-
- return p.begin()
-}
-
-func (p *parser) begin() error {
- if len(p.tokens) == 0 {
- return nil
- }
-
- err := p.addresses()
-
- if err != nil {
- return err
- }
-
- if p.eof {
- // this happens if the Caddyfile consists of only
- // a line of addresses and nothing else
- return nil
- }
-
- return p.blockContents()
-}
-
-func (p *parser) addresses() error {
- var expectingAnother bool
-
- for {
- tkn := replaceEnvVars(p.Val())
-
- // special case: import directive replaces tokens during parse-time
- if tkn == "import" && p.isNewLine() {
- err := p.doImport()
- if err != nil {
- return err
- }
- continue
- }
-
- // Open brace definitely indicates end of addresses
- if tkn == "{" {
- if expectingAnother {
- return p.Errf("Expected another address but had '%s' - check for extra comma", tkn)
- }
- break
- }
-
- if tkn != "" { // empty token possible if user typed ""
- // Trailing comma indicates another address will follow, which
- // may possibly be on the next line
- if tkn[len(tkn)-1] == ',' {
- tkn = tkn[:len(tkn)-1]
- expectingAnother = true
- } else {
- expectingAnother = false // but we may still see another one on this line
- }
-
- p.block.Keys = append(p.block.Keys, tkn)
- }
-
- // Advance token and possibly break out of loop or return error
- hasNext := p.Next()
- if expectingAnother && !hasNext {
- return p.EOFErr()
- }
- if !hasNext {
- p.eof = true
- break // EOF
- }
- if !expectingAnother && p.isNewLine() {
- break
- }
- }
-
- return nil
-}
-
-func (p *parser) blockContents() error {
- errOpenCurlyBrace := p.openCurlyBrace()
- if errOpenCurlyBrace != nil {
- // single-server configs don't need curly braces
- p.cursor--
- }
-
- err := p.directives()
- if err != nil {
- return err
- }
-
- // Only look for close curly brace if there was an opening
- if errOpenCurlyBrace == nil {
- err = p.closeCurlyBrace()
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// directives parses through all the lines for directives
-// and it expects the next token to be the first
-// directive. It goes until EOF or closing curly brace
-// which ends the server block.
-func (p *parser) directives() error {
- for p.Next() {
- // end of server block
- if p.Val() == "}" {
- break
- }
-
- // special case: import directive replaces tokens during parse-time
- if p.Val() == "import" {
- err := p.doImport()
- if err != nil {
- return err
- }
- p.cursor-- // cursor is advanced when we continue, so roll back one more
- continue
- }
-
- // normal case: parse a directive on this line
- if err := p.directive(); err != nil {
- return err
- }
- }
- return nil
-}
-
-// doImport swaps out the import directive and its argument
-// (a total of 2 tokens) with the tokens in the specified file
-// or globbing pattern. When the function returns, the cursor
-// is on the token before where the import directive was. In
-// other words, call Next() to access the first token that was
-// imported.
-func (p *parser) doImport() error {
- // syntax checks
- if !p.NextArg() {
- return p.ArgErr()
- }
- importPattern := replaceEnvVars(p.Val())
- if importPattern == "" {
- return p.Err("Import requires a non-empty filepath")
- }
- if p.NextArg() {
- return p.Err("Import takes only one argument (glob pattern or file)")
- }
-
- // make path relative to Caddyfile rather than current working directory (issue #867)
- // and then use glob to get list of matching filenames
- absFile, err := filepath.Abs(p.Dispenser.filename)
- if err != nil {
- return p.Errf("Failed to get absolute path of file: %s", p.Dispenser.filename)
- }
-
- var matches []string
- var globPattern string
- if !filepath.IsAbs(importPattern) {
- globPattern = filepath.Join(filepath.Dir(absFile), importPattern)
- } else {
- globPattern = importPattern
- }
- matches, err = filepath.Glob(globPattern)
-
- if err != nil {
- return p.Errf("Failed to use import pattern %s: %v", importPattern, err)
- }
- if len(matches) == 0 {
- if strings.Contains(globPattern, "*") {
- log.Printf("[WARNING] No files matching import pattern: %s", importPattern)
- } else {
- return p.Errf("File to import not found: %s", importPattern)
- }
- }
-
- // splice out the import directive and its argument (2 tokens total)
- tokensBefore := p.tokens[:p.cursor-1]
- tokensAfter := p.tokens[p.cursor+1:]
-
- // collect all the imported tokens
- var importedTokens []Token
- for _, importFile := range matches {
- newTokens, err := p.doSingleImport(importFile)
- if err != nil {
- return err
- }
- var importLine int
- importDir := filepath.Dir(importFile)
- for i, token := range newTokens {
- if token.Text == "import" {
- importLine = token.Line
- continue
- }
- if token.Line == importLine {
- var abs string
- if filepath.IsAbs(token.Text) {
- abs = token.Text
- } else if !filepath.IsAbs(importFile) {
- abs = filepath.Join(filepath.Dir(absFile), token.Text)
- } else {
- abs = filepath.Join(importDir, token.Text)
- }
- newTokens[i] = Token{
- Text: abs,
- Line: token.Line,
- File: token.File,
- }
- }
- }
- importedTokens = append(importedTokens, newTokens...)
- }
-
- // splice the imported tokens in the place of the import statement
- // and rewind cursor so Next() will land on first imported token
- p.tokens = append(tokensBefore, append(importedTokens, tokensAfter...)...)
- p.cursor--
-
- return nil
-}
-
-// doSingleImport lexes the individual file at importFile and returns
-// its tokens or an error, if any.
-func (p *parser) doSingleImport(importFile string) ([]Token, error) {
- file, err := os.Open(importFile)
- if err != nil {
- return nil, p.Errf("Could not import %s: %v", importFile, err)
- }
- defer file.Close()
-
- if info, err := file.Stat(); err != nil {
- return nil, p.Errf("Could not import %s: %v", importFile, err)
- } else if info.IsDir() {
- return nil, p.Errf("Could not import %s: is a directory", importFile)
- }
-
- importedTokens, err := allTokens(file)
- if err != nil {
- return nil, p.Errf("Could not read tokens while importing %s: %v", importFile, err)
- }
-
- // Tack the filename onto these tokens so errors show the imported file's name
- filename := filepath.Base(importFile)
- for i := 0; i < len(importedTokens); i++ {
- importedTokens[i].File = filename
- }
-
- return importedTokens, nil
-}
-
-// directive collects tokens until the directive's scope
-// closes (either end of line or end of curly brace block).
-// It expects the currently-loaded token to be a directive
-// (or } that ends a server block). The collected tokens
-// are loaded into the current server block for later use
-// by directive setup functions.
-func (p *parser) directive() error {
- dir := p.Val()
- nesting := 0
-
- // TODO: More helpful error message ("did you mean..." or "maybe you need to install its server type")
- if !p.validDirective(dir) {
- return p.Errf("Unknown directive '%s'", dir)
- }
-
- // The directive itself is appended as a relevant token
- p.block.Tokens[dir] = append(p.block.Tokens[dir], p.tokens[p.cursor])
-
- for p.Next() {
- if p.Val() == "{" {
- nesting++
- } else if p.isNewLine() && nesting == 0 {
- p.cursor-- // read too far
- break
- } else if p.Val() == "}" && nesting > 0 {
- nesting--
- } else if p.Val() == "}" && nesting == 0 {
- return p.Err("Unexpected '}' because no matching opening brace")
- }
- p.tokens[p.cursor].Text = replaceEnvVars(p.tokens[p.cursor].Text)
- p.block.Tokens[dir] = append(p.block.Tokens[dir], p.tokens[p.cursor])
- }
-
- if nesting > 0 {
- return p.EOFErr()
- }
- return nil
-}
-
-// openCurlyBrace expects the current token to be an
-// opening curly brace. This acts like an assertion
-// because it returns an error if the token is not
-// a opening curly brace. It does NOT advance the token.
-func (p *parser) openCurlyBrace() error {
- if p.Val() != "{" {
- return p.SyntaxErr("{")
- }
- return nil
-}
-
-// closeCurlyBrace expects the current token to be
-// a closing curly brace. This acts like an assertion
-// because it returns an error if the token is not
-// a closing curly brace. It does NOT advance the token.
-func (p *parser) closeCurlyBrace() error {
- if p.Val() != "}" {
- return p.SyntaxErr("}")
- }
- return nil
-}
-
-// validDirective returns true if dir is in p.validDirectives.
-func (p *parser) validDirective(dir string) bool {
- if p.validDirectives == nil {
- return true
- }
- for _, d := range p.validDirectives {
- if d == dir {
- return true
- }
- }
- return false
-}
-
-// replaceEnvVars replaces environment variables that appear in the token
-// and understands both the $UNIX and %WINDOWS% syntaxes.
-func replaceEnvVars(s string) string {
- s = replaceEnvReferences(s, "{%", "%}")
- s = replaceEnvReferences(s, "{$", "}")
- return s
-}
-
-// replaceEnvReferences performs the actual replacement of env variables
-// in s, given the placeholder start and placeholder end strings.
-func replaceEnvReferences(s, refStart, refEnd string) string {
- index := strings.Index(s, refStart)
- for index != -1 {
- endIndex := strings.Index(s, refEnd)
- if endIndex != -1 {
- ref := s[index : endIndex+len(refEnd)]
- s = strings.Replace(s, ref, os.Getenv(ref[len(refStart):len(ref)-len(refEnd)]), -1)
- } else {
- return s
- }
- index = strings.Index(s, refStart)
- }
- return s
-}
-
-// ServerBlock associates any number of keys (usually addresses
-// of some sort) with tokens (grouped by directive name).
-type ServerBlock struct {
- Keys []string
- Tokens map[string][]Token
-}
diff --git a/caddyfile/parse_test.go b/caddyfile/parse_test.go
deleted file mode 100644
index ad7cfa98377..00000000000
--- a/caddyfile/parse_test.go
+++ /dev/null
@@ -1,502 +0,0 @@
-package caddyfile
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "testing"
-)
-
-func TestAllTokens(t *testing.T) {
- input := strings.NewReader("a b c\nd e")
- expected := []string{"a", "b", "c", "d", "e"}
- tokens, err := allTokens(input)
-
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- if len(tokens) != len(expected) {
- t.Fatalf("Expected %d tokens, got %d", len(expected), len(tokens))
- }
-
- for i, val := range expected {
- if tokens[i].Text != val {
- t.Errorf("Token %d should be '%s' but was '%s'", i, val, tokens[i].Text)
- }
- }
-}
-
-func TestParseOneAndImport(t *testing.T) {
- testParseOne := func(input string) (ServerBlock, error) {
- p := testParser(input)
- p.Next() // parseOne doesn't call Next() to start, so we must
- err := p.parseOne()
- return p.block, err
- }
-
- for i, test := range []struct {
- input string
- shouldErr bool
- keys []string
- tokens map[string]int // map of directive name to number of tokens expected
- }{
- {`localhost`, false, []string{
- "localhost",
- }, map[string]int{}},
-
- {`localhost
- dir1`, false, []string{
- "localhost",
- }, map[string]int{
- "dir1": 1,
- }},
-
- {`localhost:1234
- dir1 foo bar`, false, []string{
- "localhost:1234",
- }, map[string]int{
- "dir1": 3,
- }},
-
- {`localhost {
- dir1
- }`, false, []string{
- "localhost",
- }, map[string]int{
- "dir1": 1,
- }},
-
- {`localhost:1234 {
- dir1 foo bar
- dir2
- }`, false, []string{
- "localhost:1234",
- }, map[string]int{
- "dir1": 3,
- "dir2": 1,
- }},
-
- {`http://localhost https://localhost
- dir1 foo bar`, false, []string{
- "http://localhost",
- "https://localhost",
- }, map[string]int{
- "dir1": 3,
- }},
-
- {`http://localhost https://localhost {
- dir1 foo bar
- }`, false, []string{
- "http://localhost",
- "https://localhost",
- }, map[string]int{
- "dir1": 3,
- }},
-
- {`http://localhost, https://localhost {
- dir1 foo bar
- }`, false, []string{
- "http://localhost",
- "https://localhost",
- }, map[string]int{
- "dir1": 3,
- }},
-
- {`http://localhost, {
- }`, true, []string{
- "http://localhost",
- }, map[string]int{}},
-
- {`host1:80, http://host2.com
- dir1 foo bar
- dir2 baz`, false, []string{
- "host1:80",
- "http://host2.com",
- }, map[string]int{
- "dir1": 3,
- "dir2": 2,
- }},
-
- {`http://host1.com,
- http://host2.com,
- https://host3.com`, false, []string{
- "http://host1.com",
- "http://host2.com",
- "https://host3.com",
- }, map[string]int{}},
-
- {`http://host1.com:1234, https://host2.com
- dir1 foo {
- bar baz
- }
- dir2`, false, []string{
- "http://host1.com:1234",
- "https://host2.com",
- }, map[string]int{
- "dir1": 6,
- "dir2": 1,
- }},
-
- {`127.0.0.1
- dir1 {
- bar baz
- }
- dir2 {
- foo bar
- }`, false, []string{
- "127.0.0.1",
- }, map[string]int{
- "dir1": 5,
- "dir2": 5,
- }},
-
- {`localhost
- dir1 {
- foo`, true, []string{
- "localhost",
- }, map[string]int{
- "dir1": 3,
- }},
-
- {`localhost
- dir1 {
- }`, false, []string{
- "localhost",
- }, map[string]int{
- "dir1": 3,
- }},
-
- {`localhost
- dir1 {
- } }`, true, []string{
- "localhost",
- }, map[string]int{
- "dir1": 3,
- }},
-
- {`localhost
- dir1 {
- nested {
- foo
- }
- }
- dir2 foo bar`, false, []string{
- "localhost",
- }, map[string]int{
- "dir1": 7,
- "dir2": 3,
- }},
-
- {``, false, []string{}, map[string]int{}},
-
- {`localhost
- dir1 arg1
- import testdata/import_test1.txt`, false, []string{
- "localhost",
- }, map[string]int{
- "dir1": 2,
- "dir2": 3,
- "dir3": 1,
- }},
-
- {`import testdata/import_test2.txt`, false, []string{
- "host1",
- }, map[string]int{
- "dir1": 1,
- "dir2": 2,
- }},
-
- {`import testdata/import_test1.txt testdata/import_test2.txt`, true, []string{}, map[string]int{}},
-
- {`import testdata/not_found.txt`, true, []string{}, map[string]int{}},
-
- {`""`, false, []string{}, map[string]int{}},
-
- {``, false, []string{}, map[string]int{}},
- } {
- result, err := testParseOne(test.input)
-
- if test.shouldErr && err == nil {
- t.Errorf("Test %d: Expected an error, but didn't get one", i)
- }
- if !test.shouldErr && err != nil {
- t.Errorf("Test %d: Expected no error, but got: %v", i, err)
- }
-
- if len(result.Keys) != len(test.keys) {
- t.Errorf("Test %d: Expected %d keys, got %d",
- i, len(test.keys), len(result.Keys))
- continue
- }
- for j, addr := range result.Keys {
- if addr != test.keys[j] {
- t.Errorf("Test %d, key %d: Expected '%s', but was '%s'",
- i, j, test.keys[j], addr)
- }
- }
-
- if len(result.Tokens) != len(test.tokens) {
- t.Errorf("Test %d: Expected %d directives, had %d",
- i, len(test.tokens), len(result.Tokens))
- continue
- }
- for directive, tokens := range result.Tokens {
- if len(tokens) != test.tokens[directive] {
- t.Errorf("Test %d, directive '%s': Expected %d tokens, counted %d",
- i, directive, test.tokens[directive], len(tokens))
- continue
- }
- }
- }
-}
-
-func TestRecursiveImport(t *testing.T) {
- testParseOne := func(input string) (ServerBlock, error) {
- p := testParser(input)
- p.Next() // parseOne doesn't call Next() to start, so we must
- err := p.parseOne()
- return p.block, err
- }
-
- isExpected := func(got ServerBlock) bool {
- if len(got.Keys) != 1 || got.Keys[0] != "localhost" {
- t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys)
- return false
- }
- if len(got.Tokens) != 2 {
- t.Errorf("got wrong number of tokens: expect 2, got %d", len(got.Tokens))
- return false
- }
- if len(got.Tokens["dir1"]) != 1 || len(got.Tokens["dir2"]) != 2 {
- t.Errorf("got unexpect tokens: %v", got.Tokens)
- return false
- }
- return true
- }
-
- recursiveFile1, err := filepath.Abs("testdata/recursive_import_test1")
- if err != nil {
- t.Fatal(err)
- }
- recursiveFile2, err := filepath.Abs("testdata/recursive_import_test2")
- if err != nil {
- t.Fatal(err)
- }
-
- // test relative recursive import
- err = ioutil.WriteFile(recursiveFile1, []byte(
- `localhost
- dir1
- import recursive_import_test2`), 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer os.Remove(recursiveFile1)
-
- err = ioutil.WriteFile(recursiveFile2, []byte("dir2 1"), 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer os.Remove(recursiveFile2)
-
- // import absolute path
- result, err := testParseOne("import " + recursiveFile1)
- if err != nil {
- t.Fatal(err)
- }
- if !isExpected(result) {
- t.Error("absolute+relative import failed")
- }
-
- // import relative path
- result, err = testParseOne("import testdata/recursive_import_test1")
- if err != nil {
- t.Fatal(err)
- }
- if !isExpected(result) {
- t.Error("relative+relative import failed")
- }
-
- // test absolute recursive import
- err = ioutil.WriteFile(recursiveFile1, []byte(
- `localhost
- dir1
- import `+recursiveFile2), 0644)
- if err != nil {
- t.Fatal(err)
- }
-
- // import absolute path
- result, err = testParseOne("import " + recursiveFile1)
- if err != nil {
- t.Fatal(err)
- }
- if !isExpected(result) {
- t.Error("absolute+absolute import failed")
- }
-
- // import relative path
- result, err = testParseOne("import testdata/recursive_import_test1")
- if err != nil {
- t.Fatal(err)
- }
- if !isExpected(result) {
- t.Error("relative+absolute import failed")
- }
-}
-
-func TestParseAll(t *testing.T) {
- for i, test := range []struct {
- input string
- shouldErr bool
- keys [][]string // keys per server block, in order
- }{
- {`localhost`, false, [][]string{
- {"localhost"},
- }},
-
- {`localhost:1234`, false, [][]string{
- {"localhost:1234"},
- }},
-
- {`localhost:1234 {
- }
- localhost:2015 {
- }`, false, [][]string{
- {"localhost:1234"},
- {"localhost:2015"},
- }},
-
- {`localhost:1234, http://host2`, false, [][]string{
- {"localhost:1234", "http://host2"},
- }},
-
- {`localhost:1234, http://host2,`, true, [][]string{}},
-
- {`http://host1.com, http://host2.com {
- }
- https://host3.com, https://host4.com {
- }`, false, [][]string{
- {"http://host1.com", "http://host2.com"},
- {"https://host3.com", "https://host4.com"},
- }},
-
- {`import testdata/import_glob*.txt`, false, [][]string{
- {"glob0.host0"},
- {"glob0.host1"},
- {"glob1.host0"},
- {"glob2.host0"},
- }},
-
- {`import notfound/*`, false, [][]string{}}, // glob needn't error with no matches
- {`import notfound/file.conf`, true, [][]string{}}, // but a specific file should
- } {
- p := testParser(test.input)
- blocks, err := p.parseAll()
-
- if test.shouldErr && err == nil {
- t.Errorf("Test %d: Expected an error, but didn't get one", i)
- }
- if !test.shouldErr && err != nil {
- t.Errorf("Test %d: Expected no error, but got: %v", i, err)
- }
-
- if len(blocks) != len(test.keys) {
- t.Errorf("Test %d: Expected %d server blocks, got %d",
- i, len(test.keys), len(blocks))
- continue
- }
- for j, block := range blocks {
- if len(block.Keys) != len(test.keys[j]) {
- t.Errorf("Test %d: Expected %d keys in block %d, got %d",
- i, len(test.keys[j]), j, len(block.Keys))
- continue
- }
- for k, addr := range block.Keys {
- if addr != test.keys[j][k] {
- t.Errorf("Test %d, block %d, key %d: Expected '%s', but got '%s'",
- i, j, k, test.keys[j][k], addr)
- }
- }
- }
- }
-}
-
-func TestEnvironmentReplacement(t *testing.T) {
- os.Setenv("PORT", "8080")
- os.Setenv("ADDRESS", "servername.com")
- os.Setenv("FOOBAR", "foobar")
-
- // basic test; unix-style env vars
- p := testParser(`{$ADDRESS}`)
- blocks, _ := p.parseAll()
- if actual, expected := blocks[0].Keys[0], "servername.com"; expected != actual {
- t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
- }
-
- // multiple vars per token
- p = testParser(`{$ADDRESS}:{$PORT}`)
- blocks, _ = p.parseAll()
- if actual, expected := blocks[0].Keys[0], "servername.com:8080"; expected != actual {
- t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
- }
-
- // windows-style var and unix style in same token
- p = testParser(`{%ADDRESS%}:{$PORT}`)
- blocks, _ = p.parseAll()
- if actual, expected := blocks[0].Keys[0], "servername.com:8080"; expected != actual {
- t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
- }
-
- // reverse order
- p = testParser(`{$ADDRESS}:{%PORT%}`)
- blocks, _ = p.parseAll()
- if actual, expected := blocks[0].Keys[0], "servername.com:8080"; expected != actual {
- t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
- }
-
- // env var in server block body as argument
- p = testParser(":{%PORT%}\ndir1 {$FOOBAR}")
- blocks, _ = p.parseAll()
- if actual, expected := blocks[0].Keys[0], ":8080"; expected != actual {
- t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
- }
- if actual, expected := blocks[0].Tokens["dir1"][1].Text, "foobar"; expected != actual {
- t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
- }
-
- // combined windows env vars in argument
- p = testParser(":{%PORT%}\ndir1 {%ADDRESS%}/{%FOOBAR%}")
- blocks, _ = p.parseAll()
- if actual, expected := blocks[0].Tokens["dir1"][1].Text, "servername.com/foobar"; expected != actual {
- t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
- }
-
- // malformed env var (windows)
- p = testParser(":1234\ndir1 {%ADDRESS}")
- blocks, _ = p.parseAll()
- if actual, expected := blocks[0].Tokens["dir1"][1].Text, "{%ADDRESS}"; expected != actual {
- t.Errorf("Expected host to be '%s' but was '%s'", expected, actual)
- }
-
- // malformed (non-existent) env var (unix)
- p = testParser(`:{$PORT$}`)
- blocks, _ = p.parseAll()
- if actual, expected := blocks[0].Keys[0], ":"; expected != actual {
- t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
- }
-
- // in quoted field
- p = testParser(":1234\ndir1 \"Test {$FOOBAR} test\"")
- blocks, _ = p.parseAll()
- if actual, expected := blocks[0].Tokens["dir1"][1].Text, "Test foobar test"; expected != actual {
- t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
- }
-}
-
-func testParser(input string) parser {
- buf := strings.NewReader(input)
- p := parser{Dispenser: NewDispenser("Caddyfile", buf)}
- return p
-}
diff --git a/caddyhttp/basicauth/basicauth.go b/caddyhttp/basicauth/basicauth.go
deleted file mode 100644
index 5661a017d68..00000000000
--- a/caddyhttp/basicauth/basicauth.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Package basicauth implements HTTP Basic Authentication for Caddy.
-//
-// This is useful for simple protections on a website, like requiring
-// a password to access an admin interface. This package assumes a
-// fairly small threat model.
-package basicauth
-
-import (
- "bufio"
- "context"
- "crypto/sha1"
- "crypto/subtle"
- "fmt"
- "io"
- "net/http"
- "os"
- "path/filepath"
- "strings"
- "sync"
-
- "github.com/jimstudt/http-authentication/basic"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// BasicAuth is middleware to protect resources with a username and password.
-// Note that HTTP Basic Authentication is not secure by itself and should
-// not be used to protect important assets without HTTPS. Even then, the
-// security of HTTP Basic Auth is disputed. Use discretion when deciding
-// what to protect with BasicAuth.
-type BasicAuth struct {
- Next httpserver.Handler
- SiteRoot string
- Rules []Rule
-}
-
-// ServeHTTP implements the httpserver.Handler interface.
-func (a BasicAuth) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- var protected, isAuthenticated bool
- var realm string
-
- for _, rule := range a.Rules {
- for _, res := range rule.Resources {
- if !httpserver.Path(r.URL.Path).Matches(res) {
- continue
- }
-
- // path matches; this endpoint is protected
- protected = true
- realm = rule.Realm
-
- // parse auth header
- username, password, ok := r.BasicAuth()
-
- // check credentials
- if !ok ||
- username != rule.Username ||
- !rule.Password(password) {
- continue
- }
-
- // by this point, authentication was successful
- isAuthenticated = true
-
- // let upstream middleware (e.g. fastcgi and cgi) know about authenticated
- // user; this replaces the request with a wrapped instance
- r = r.WithContext(context.WithValue(r.Context(),
- httpserver.RemoteUserCtxKey, username))
- }
- }
-
- if protected && !isAuthenticated {
- // browsers show a message that says something like:
- // "The website says: "
- // which is kinda dumb, but whatever.
- if realm == "" {
- realm = "Restricted"
- }
- w.Header().Set("WWW-Authenticate", "Basic realm=\""+realm+"\"")
- return http.StatusUnauthorized, nil
- }
-
- // Pass-through when no paths match
- return a.Next.ServeHTTP(w, r)
-}
-
-// Rule represents a BasicAuth rule. A username and password
-// combination protect the associated resources, which are
-// file or directory paths.
-type Rule struct {
- Username string
- Password func(string) bool
- Resources []string
- Realm string // See RFC 1945 and RFC 2617, default: "Restricted"
-}
-
-// PasswordMatcher determines whether a password matches a rule.
-type PasswordMatcher func(pw string) bool
-
-var (
- htpasswords map[string]map[string]PasswordMatcher
- htpasswordsMu sync.Mutex
-)
-
-// GetHtpasswdMatcher matches password rules.
-func GetHtpasswdMatcher(filename, username, siteRoot string) (PasswordMatcher, error) {
- filename = filepath.Join(siteRoot, filename)
- htpasswordsMu.Lock()
- if htpasswords == nil {
- htpasswords = make(map[string]map[string]PasswordMatcher)
- }
- pm := htpasswords[filename]
- if pm == nil {
- fh, err := os.Open(filename)
- if err != nil {
- return nil, fmt.Errorf("open %q: %v", filename, err)
- }
- defer fh.Close()
- pm = make(map[string]PasswordMatcher)
- if err = parseHtpasswd(pm, fh); err != nil {
- return nil, fmt.Errorf("parsing htpasswd %q: %v", fh.Name(), err)
- }
- htpasswords[filename] = pm
- }
- htpasswordsMu.Unlock()
- if pm[username] == nil {
- return nil, fmt.Errorf("username %q not found in %q", username, filename)
- }
- return pm[username], nil
-}
-
-func parseHtpasswd(pm map[string]PasswordMatcher, r io.Reader) error {
- scanner := bufio.NewScanner(r)
- for scanner.Scan() {
- line := strings.TrimSpace(scanner.Text())
- if line == "" || strings.IndexByte(line, '#') == 0 {
- continue
- }
- i := strings.IndexByte(line, ':')
- if i <= 0 {
- return fmt.Errorf("malformed line, no color: %q", line)
- }
- user, encoded := line[:i], line[i+1:]
- for _, p := range basic.DefaultSystems {
- matcher, err := p(encoded)
- if err != nil {
- return err
- }
- if matcher != nil {
- pm[user] = matcher.MatchesPassword
- break
- }
- }
- }
- return scanner.Err()
-}
-
-// PlainMatcher returns a PasswordMatcher that does a constant-time
-// byte comparison against the password passw.
-func PlainMatcher(passw string) PasswordMatcher {
- // compare hashes of equal length instead of actual password
- // to avoid leaking password length
- passwHash := sha1.New()
- passwHash.Write([]byte(passw))
- passwSum := passwHash.Sum(nil)
- return func(pw string) bool {
- pwHash := sha1.New()
- pwHash.Write([]byte(pw))
- pwSum := pwHash.Sum(nil)
- return subtle.ConstantTimeCompare([]byte(pwSum), []byte(passwSum)) == 1
- }
-}
diff --git a/caddyhttp/basicauth/basicauth_test.go b/caddyhttp/basicauth/basicauth_test.go
deleted file mode 100644
index 3f0113b2e15..00000000000
--- a/caddyhttp/basicauth/basicauth_test.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package basicauth
-
-import (
- "encoding/base64"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "os"
- "path/filepath"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestBasicAuth(t *testing.T) {
- var i int
- // This handler is registered for tests in which the only authorized user is
- // "okuser"
- upstreamHandler := func(w http.ResponseWriter, r *http.Request) (int, error) {
- remoteUser, _ := r.Context().Value(httpserver.RemoteUserCtxKey).(string)
- if remoteUser != "okuser" {
- t.Errorf("Test %d: expecting remote user 'okuser', got '%s'", i, remoteUser)
- }
- return http.StatusOK, nil
- }
- rws := []BasicAuth{
- {
- Next: httpserver.HandlerFunc(upstreamHandler),
- Rules: []Rule{
- {Username: "okuser", Password: PlainMatcher("okpass"),
- Resources: []string{"/testing"}, Realm: "Resources"},
- },
- },
- {
- Next: httpserver.HandlerFunc(upstreamHandler),
- Rules: []Rule{
- {Username: "okuser", Password: PlainMatcher("okpass"),
- Resources: []string{"/testing"}},
- },
- },
- }
-
- type testType struct {
- from string
- result int
- user string
- password string
- }
-
- tests := []testType{
- {"/testing", http.StatusOK, "okuser", "okpass"},
- {"/testing", http.StatusUnauthorized, "baduser", "okpass"},
- {"/testing", http.StatusUnauthorized, "okuser", "badpass"},
- {"/testing", http.StatusUnauthorized, "OKuser", "okpass"},
- {"/testing", http.StatusUnauthorized, "OKuser", "badPASS"},
- {"/testing", http.StatusUnauthorized, "", "okpass"},
- {"/testing", http.StatusUnauthorized, "okuser", ""},
- {"/testing", http.StatusUnauthorized, "", ""},
- }
-
- var test testType
- for _, rw := range rws {
- expectRealm := rw.Rules[0].Realm
- if expectRealm == "" {
- expectRealm = "Restricted" // Default if Realm not specified in rule
- }
- for i, test = range tests {
- req, err := http.NewRequest("GET", test.from, nil)
- if err != nil {
- t.Fatalf("Test %d: Could not create HTTP request: %v", i, err)
- }
- req.SetBasicAuth(test.user, test.password)
-
- rec := httptest.NewRecorder()
- result, err := rw.ServeHTTP(rec, req)
- if err != nil {
- t.Fatalf("Test %d: Could not ServeHTTP: %v", i, err)
- }
- if result != test.result {
- t.Errorf("Test %d: Expected status code %d but was %d",
- i, test.result, result)
- }
- if test.result == http.StatusUnauthorized {
- headers := rec.Header()
- if val, ok := headers["Www-Authenticate"]; ok {
- if got, want := val[0], "Basic realm=\""+expectRealm+"\""; got != want {
- t.Errorf("Test %d: Www-Authenticate header should be '%s', got: '%s'", i, want, got)
- }
- } else {
- t.Errorf("Test %d: response should have a 'Www-Authenticate' header", i)
- }
- } else {
- if req.Header.Get("Authorization") == "" {
- // see issue #1508: https://github.com/mholt/caddy/issues/1508
- t.Errorf("Test %d: Expected Authorization header to be retained after successful auth, but was empty", i)
- }
- }
- }
- }
-}
-
-func TestMultipleOverlappingRules(t *testing.T) {
- rw := BasicAuth{
- Next: httpserver.HandlerFunc(contentHandler),
- Rules: []Rule{
- {Username: "t", Password: PlainMatcher("p1"), Resources: []string{"/t"}},
- {Username: "t1", Password: PlainMatcher("p2"), Resources: []string{"/t/t"}},
- },
- }
-
- tests := []struct {
- from string
- result int
- cred string
- }{
- {"/t", http.StatusOK, "t:p1"},
- {"/t/t", http.StatusOK, "t:p1"},
- {"/t/t", http.StatusOK, "t1:p2"},
- {"/a", http.StatusOK, "t1:p2"},
- {"/t/t", http.StatusUnauthorized, "t1:p3"},
- {"/t", http.StatusUnauthorized, "t1:p2"},
- }
-
- for i, test := range tests {
-
- req, err := http.NewRequest("GET", test.from, nil)
- if err != nil {
- t.Fatalf("Test %d: Could not create HTTP request %v", i, err)
- }
- auth := "Basic " + base64.StdEncoding.EncodeToString([]byte(test.cred))
- req.Header.Set("Authorization", auth)
-
- rec := httptest.NewRecorder()
- result, err := rw.ServeHTTP(rec, req)
- if err != nil {
- t.Fatalf("Test %d: Could not ServeHTTP %v", i, err)
- }
- if result != test.result {
- t.Errorf("Test %d: Expected Header '%d' but was '%d'",
- i, test.result, result)
- }
-
- }
-
-}
-
-func contentHandler(w http.ResponseWriter, r *http.Request) (int, error) {
- fmt.Fprintf(w, r.URL.String())
- return http.StatusOK, nil
-}
-
-func TestHtpasswd(t *testing.T) {
- htpasswdPasswd := "IedFOuGmTpT8"
- htpasswdFile := `sha1:{SHA}dcAUljwz99qFjYR0YLTXx0RqLww=
-md5:$apr1$l42y8rex$pOA2VJ0x/0TwaFeAF9nX61`
-
- htfh, err := ioutil.TempFile("", "basicauth-")
- if err != nil {
- t.Skipf("Error creating temp file (%v), will skip htpassword test")
- return
- }
- defer os.Remove(htfh.Name())
- if _, err = htfh.Write([]byte(htpasswdFile)); err != nil {
- t.Fatalf("write htpasswd file %q: %v", htfh.Name(), err)
- }
- htfh.Close()
-
- for i, username := range []string{"sha1", "md5"} {
- rule := Rule{Username: username, Resources: []string{"/testing"}}
-
- siteRoot := filepath.Dir(htfh.Name())
- filename := filepath.Base(htfh.Name())
- if rule.Password, err = GetHtpasswdMatcher(filename, rule.Username, siteRoot); err != nil {
- t.Fatalf("GetHtpasswdMatcher(%q, %q): %v", htfh.Name(), rule.Username, err)
- }
- t.Logf("%d. username=%q", i, rule.Username)
- if !rule.Password(htpasswdPasswd) || rule.Password(htpasswdPasswd+"!") {
- t.Errorf("%d (%s) password does not match.", i, rule.Username)
- }
- }
-}
diff --git a/caddyhttp/basicauth/setup.go b/caddyhttp/basicauth/setup.go
deleted file mode 100644
index 9fa6ddd6003..00000000000
--- a/caddyhttp/basicauth/setup.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package basicauth
-
-import (
- "strings"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("basicauth", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// setup configures a new BasicAuth middleware instance.
-func setup(c *caddy.Controller) error {
- cfg := httpserver.GetConfig(c)
- root := cfg.Root
-
- rules, err := basicAuthParse(c)
- if err != nil {
- return err
- }
-
- basic := BasicAuth{Rules: rules}
-
- cfg.AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- basic.Next = next
- basic.SiteRoot = root
- return basic
- })
-
- return nil
-}
-
-func basicAuthParse(c *caddy.Controller) ([]Rule, error) {
- var rules []Rule
- cfg := httpserver.GetConfig(c)
-
- var err error
- for c.Next() {
- var rule Rule
-
- args := c.RemainingArgs()
-
- switch len(args) {
- case 2:
- rule.Username = args[0]
- if rule.Password, err = passwordMatcher(rule.Username, args[1], cfg.Root); err != nil {
- return rules, c.Errf("Get password matcher from %s: %v", c.Val(), err)
- }
- case 3:
- rule.Resources = append(rule.Resources, args[0])
- rule.Username = args[1]
- if rule.Password, err = passwordMatcher(rule.Username, args[2], cfg.Root); err != nil {
- return rules, c.Errf("Get password matcher from %s: %v", c.Val(), err)
- }
- default:
- return rules, c.ArgErr()
- }
-
- // If nested block is present, process it here
- for c.NextBlock() {
- val := c.Val()
- args = c.RemainingArgs()
- switch len(args) {
- case 0:
- // Assume single argument is path resource
- rule.Resources = append(rule.Resources, val)
- case 1:
- if val == "realm" {
- if rule.Realm == "" {
- rule.Realm = strings.Replace(args[0], `"`, `\"`, -1)
- } else {
- return rules, c.Errf("\"realm\" subdirective can only be specified once")
- }
- } else {
- return rules, c.Errf("expecting \"realm\", got \"%s\"", val)
- }
- default:
- return rules, c.ArgErr()
- }
- }
-
- rules = append(rules, rule)
- }
-
- return rules, nil
-}
-
-func passwordMatcher(username, passw, siteRoot string) (PasswordMatcher, error) {
- htpasswdPrefix := "htpasswd="
- if !strings.HasPrefix(passw, htpasswdPrefix) {
- return PlainMatcher(passw), nil
- }
- return GetHtpasswdMatcher(passw[len(htpasswdPrefix):], username, siteRoot)
-}
diff --git a/caddyhttp/basicauth/setup_test.go b/caddyhttp/basicauth/setup_test.go
deleted file mode 100644
index 1075b2bc0a3..00000000000
--- a/caddyhttp/basicauth/setup_test.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package basicauth
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "strings"
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
- c := caddy.NewTestController("http", `basicauth user pwd`)
- err := setup(c)
- if err != nil {
- t.Errorf("Expected no errors, but got: %v", err)
- }
- mids := httpserver.GetConfig(c).Middleware()
- if len(mids) == 0 {
- t.Fatal("Expected middleware, got 0 instead")
- }
-
- handler := mids[0](httpserver.EmptyNext)
- myHandler, ok := handler.(BasicAuth)
- if !ok {
- t.Fatalf("Expected handler to be type BasicAuth, got: %#v", handler)
- }
-
- if !httpserver.SameNext(myHandler.Next, httpserver.EmptyNext) {
- t.Error("'Next' field of handler was not set properly")
- }
-}
-
-func TestBasicAuthParse(t *testing.T) {
- htpasswdPasswd := "IedFOuGmTpT8"
- htpasswdFile := `sha1:{SHA}dcAUljwz99qFjYR0YLTXx0RqLww=
-md5:$apr1$l42y8rex$pOA2VJ0x/0TwaFeAF9nX61`
-
- var skipHtpassword bool
- htfh, err := ioutil.TempFile(".", "basicauth-")
- if err != nil {
- t.Logf("Error creating temp file (%v), will skip htpassword test", err)
- skipHtpassword = true
- } else {
- if _, err = htfh.Write([]byte(htpasswdFile)); err != nil {
- t.Fatalf("write htpasswd file %q: %v", htfh.Name(), err)
- }
- htfh.Close()
- defer os.Remove(htfh.Name())
- }
-
- tests := []struct {
- input string
- shouldErr bool
- password string
- expected []Rule
- }{
- {`basicauth user pwd`, false, "pwd", []Rule{
- {Username: "user"},
- }},
- {`basicauth user pwd {
- }`, false, "pwd", []Rule{
- {Username: "user"},
- }},
- {`basicauth /resource1 user pwd {
- }`, false, "pwd", []Rule{
- {Username: "user", Resources: []string{"/resource1"}},
- }},
- {`basicauth /resource1 user pwd {
- realm Resources
- }`, false, "pwd", []Rule{
- {Username: "user", Resources: []string{"/resource1"}, Realm: "Resources"},
- }},
- {`basicauth user pwd {
- /resource1
- /resource2
- }`, false, "pwd", []Rule{
- {Username: "user", Resources: []string{"/resource1", "/resource2"}},
- }},
- {`basicauth user pwd {
- /resource1
- /resource2
- realm "Secure resources"
- }`, false, "pwd", []Rule{
- {Username: "user", Resources: []string{"/resource1", "/resource2"}, Realm: "Secure resources"},
- }},
- {`basicauth user pwd {
- /resource1
- realm "Secure resources"
- realm Extra
- /resource2
- }`, true, "pwd", []Rule{}},
- {`basicauth user pwd {
- /resource1
- foo "Resources"
- /resource2
- }`, true, "pwd", []Rule{}},
- {`basicauth /resource user pwd`, false, "pwd", []Rule{
- {Username: "user", Resources: []string{"/resource"}},
- }},
- {`basicauth /res1 user1 pwd1
- basicauth /res2 user2 pwd2`, false, "pwd", []Rule{
- {Username: "user1", Resources: []string{"/res1"}},
- {Username: "user2", Resources: []string{"/res2"}},
- }},
- {`basicauth user`, true, "", []Rule{}},
- {`basicauth`, true, "", []Rule{}},
- {`basicauth /resource user pwd asdf`, true, "", []Rule{}},
-
- {`basicauth sha1 htpasswd=` + htfh.Name(), false, htpasswdPasswd, []Rule{
- {Username: "sha1"},
- }},
- }
-
- for i, test := range tests {
- actual, err := basicAuthParse(caddy.NewTestController("http", test.input))
-
- if err == nil && test.shouldErr {
- t.Errorf("Test %d didn't error, but it should have", i)
- } else if err != nil && !test.shouldErr {
- t.Errorf("Test %d errored, but it shouldn't have; got '%v'", i, err)
- }
-
- if len(actual) != len(test.expected) {
- t.Fatalf("Test %d expected %d rules, but got %d",
- i, len(test.expected), len(actual))
- }
-
- for j, expectedRule := range test.expected {
- actualRule := actual[j]
-
- if actualRule.Username != expectedRule.Username {
- t.Errorf("Test %d, rule %d: Expected username '%s', got '%s'",
- i, j, expectedRule.Username, actualRule.Username)
- }
-
- if actualRule.Realm != expectedRule.Realm {
- t.Errorf("Test %d, rule %d: Expected realm '%s', got '%s'",
- i, j, expectedRule.Realm, actualRule.Realm)
- }
-
- if strings.Contains(test.input, "htpasswd=") && skipHtpassword {
- continue
- }
- pwd := test.password
- if len(actual) > 1 {
- pwd = fmt.Sprintf("%s%d", pwd, j+1)
- }
- if !actualRule.Password(pwd) || actualRule.Password(test.password+"!") {
- t.Errorf("Test %d, rule %d: Expected password '%v', got '%v'",
- i, j, test.password, actualRule.Password(""))
- }
-
- expectedRes := fmt.Sprintf("%v", expectedRule.Resources)
- actualRes := fmt.Sprintf("%v", actualRule.Resources)
- if actualRes != expectedRes {
- t.Errorf("Test %d, rule %d: Expected resource list %s, but got %s",
- i, j, expectedRes, actualRes)
- }
- }
- }
-}
diff --git a/caddyhttp/bind/bind.go b/caddyhttp/bind/bind.go
deleted file mode 100644
index c69aa0b1dac..00000000000
--- a/caddyhttp/bind/bind.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package bind
-
-import (
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("bind", caddy.Plugin{
- ServerType: "http",
- Action: setupBind,
- })
-}
-
-func setupBind(c *caddy.Controller) error {
- config := httpserver.GetConfig(c)
- for c.Next() {
- if !c.Args(&config.ListenHost) {
- return c.ArgErr()
- }
- config.TLS.ListenHost = config.ListenHost // necessary for ACME challenges, see issue #309
- }
- return nil
-}
diff --git a/caddyhttp/bind/bind_test.go b/caddyhttp/bind/bind_test.go
deleted file mode 100644
index 8d81f84af28..00000000000
--- a/caddyhttp/bind/bind_test.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package bind
-
-import (
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetupBind(t *testing.T) {
- c := caddy.NewTestController("http", `bind 1.2.3.4`)
- err := setupBind(c)
- if err != nil {
- t.Fatalf("Expected no errors, but got: %v", err)
- }
-
- cfg := httpserver.GetConfig(c)
- if got, want := cfg.ListenHost, "1.2.3.4"; got != want {
- t.Errorf("Expected the config's ListenHost to be %s, was %s", want, got)
- }
- if got, want := cfg.TLS.ListenHost, "1.2.3.4"; got != want {
- t.Errorf("Expected the TLS config's ListenHost to be %s, was %s", want, got)
- }
-}
diff --git a/caddyhttp/browse/browse.go b/caddyhttp/browse/browse.go
deleted file mode 100644
index e1368059c0b..00000000000
--- a/caddyhttp/browse/browse.go
+++ /dev/null
@@ -1,514 +0,0 @@
-// Package browse provides middleware for listing files in a directory
-// when directory path is requested instead of a specific file.
-package browse
-
-import (
- "bytes"
- "encoding/json"
- "net/http"
- "net/url"
- "os"
- "path"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
- "text/template"
- "time"
-
- "github.com/dustin/go-humanize"
- "github.com/mholt/caddy/caddyhttp/httpserver"
- "github.com/mholt/caddy/caddyhttp/staticfiles"
-)
-
-const (
- sortByName = "name"
- sortByNameDirFirst = "namedirfirst"
- sortBySize = "size"
- sortByTime = "time"
-)
-
-// Browse is an http.Handler that can show a file listing when
-// directories in the given paths are specified.
-type Browse struct {
- Next httpserver.Handler
- Configs []Config
- IgnoreIndexes bool
-}
-
-// Config is a configuration for browsing in a particular path.
-type Config struct {
- PathScope string // the base path the URL must match to enable browsing
- Fs staticfiles.FileServer
- Variables interface{}
- Template *template.Template
-}
-
-// A Listing is the context used to fill out a template.
-type Listing struct {
- // The name of the directory (the last element of the path)
- Name string
-
- // The full path of the request
- Path string
-
- // Whether the parent directory is browsable
- CanGoUp bool
-
- // The items (files and folders) in the path
- Items []FileInfo
-
- // The number of directories in the listing
- NumDirs int
-
- // The number of files (items that aren't directories) in the listing
- NumFiles int
-
- // Which sorting order is used
- Sort string
-
- // And which order
- Order string
-
- // If ≠0 then Items have been limited to that many elements
- ItemsLimitedTo int
-
- // Optional custom variables for use in browse templates
- User interface{}
-
- httpserver.Context
-}
-
-// Crumb represents part of a breadcrumb menu.
-type Crumb struct {
- Link, Text string
-}
-
-// Breadcrumbs returns l.Path where every element maps
-// the link to the text to display.
-func (l Listing) Breadcrumbs() []Crumb {
- var result []Crumb
-
- if len(l.Path) == 0 {
- return result
- }
-
- // skip trailing slash
- lpath := l.Path
- if lpath[len(lpath)-1] == '/' {
- lpath = lpath[:len(lpath)-1]
- }
-
- parts := strings.Split(lpath, "/")
- for i := range parts {
- txt := parts[i]
- if i == 0 && parts[i] == "" {
- txt = "/"
- }
- result = append(result, Crumb{Link: strings.Repeat("../", len(parts)-i-1), Text: txt})
- }
-
- return result
-}
-
-// FileInfo is the info about a particular file or directory
-type FileInfo struct {
- Name string
- Size int64
- URL string
- ModTime time.Time
- Mode os.FileMode
- IsDir bool
- IsSymlink bool
-}
-
-// HumanSize returns the size of the file as a human-readable string
-// in IEC format (i.e. power of 2 or base 1024).
-func (fi FileInfo) HumanSize() string {
- return humanize.IBytes(uint64(fi.Size))
-}
-
-// HumanModTime returns the modified time of the file as a human-readable string.
-func (fi FileInfo) HumanModTime(format string) string {
- return fi.ModTime.Format(format)
-}
-
-// Implement sorting for Listing
-type byName Listing
-type byNameDirFirst Listing
-type bySize Listing
-type byTime Listing
-
-// By Name
-func (l byName) Len() int { return len(l.Items) }
-func (l byName) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
-
-// Treat upper and lower case equally
-func (l byName) Less(i, j int) bool {
- return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
-}
-
-// By Name Dir First
-func (l byNameDirFirst) Len() int { return len(l.Items) }
-func (l byNameDirFirst) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
-
-// Treat upper and lower case equally
-func (l byNameDirFirst) Less(i, j int) bool {
-
- // if both are dir or file sort normally
- if l.Items[i].IsDir == l.Items[j].IsDir {
- return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
- }
-
- // always sort dir ahead of file
- return l.Items[i].IsDir
-}
-
-// By Size
-func (l bySize) Len() int { return len(l.Items) }
-func (l bySize) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
-
-const directoryOffset = -1 << 31 // = math.MinInt32
-func (l bySize) Less(i, j int) bool {
- iSize, jSize := l.Items[i].Size, l.Items[j].Size
-
- // Directory sizes depend on the filesystem implementation,
- // which is opaque to a visitor, and should indeed does not change if the operator choses to change the fs.
- // For a consistent user experience directories are pulled to the front…
- if l.Items[i].IsDir {
- iSize = directoryOffset
- }
- if l.Items[j].IsDir {
- jSize = directoryOffset
- }
- // … and sorted by name.
- if l.Items[i].IsDir && l.Items[j].IsDir {
- return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
- }
-
- return iSize < jSize
-}
-
-// By Time
-func (l byTime) Len() int { return len(l.Items) }
-func (l byTime) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
-func (l byTime) Less(i, j int) bool { return l.Items[i].ModTime.Before(l.Items[j].ModTime) }
-
-// Add sorting method to "Listing"
-// it will apply what's in ".Sort" and ".Order"
-func (l Listing) applySort() {
- // Check '.Order' to know how to sort
- if l.Order == "desc" {
- switch l.Sort {
- case sortByName:
- sort.Sort(sort.Reverse(byName(l)))
- case sortByNameDirFirst:
- sort.Sort(sort.Reverse(byNameDirFirst(l)))
- case sortBySize:
- sort.Sort(sort.Reverse(bySize(l)))
- case sortByTime:
- sort.Sort(sort.Reverse(byTime(l)))
- default:
- // If not one of the above, do nothing
- return
- }
- } else { // If we had more Orderings we could add them here
- switch l.Sort {
- case sortByName:
- sort.Sort(byName(l))
- case sortByNameDirFirst:
- sort.Sort(byNameDirFirst(l))
- case sortBySize:
- sort.Sort(bySize(l))
- case sortByTime:
- sort.Sort(byTime(l))
- default:
- // If not one of the above, do nothing
- return
- }
- }
-}
-
-func directoryListing(files []os.FileInfo, canGoUp bool, urlPath string, config *Config) (Listing, bool) {
- var (
- fileinfos []FileInfo
- dirCount, fileCount int
- hasIndexFile bool
- )
-
- for _, f := range files {
- name := f.Name()
-
- for _, indexName := range staticfiles.IndexPages {
- if name == indexName {
- hasIndexFile = true
- break
- }
- }
-
- isDir := f.IsDir() || isSymlinkTargetDir(f, urlPath, config)
-
- if isDir {
- name += "/"
- dirCount++
- } else {
- fileCount++
- }
-
- if config.Fs.IsHidden(f) {
- continue
- }
-
- url := url.URL{Path: "./" + name} // prepend with "./" to fix paths with ':' in the name
-
- fileinfos = append(fileinfos, FileInfo{
- IsDir: isDir,
- IsSymlink: isSymlink(f),
- Name: f.Name(),
- Size: f.Size(),
- URL: url.String(),
- ModTime: f.ModTime().UTC(),
- Mode: f.Mode(),
- })
- }
-
- return Listing{
- Name: path.Base(urlPath),
- Path: urlPath,
- CanGoUp: canGoUp,
- Items: fileinfos,
- NumDirs: dirCount,
- NumFiles: fileCount,
- }, hasIndexFile
-}
-
-// isSymlink return true if f is a symbolic link
-func isSymlink(f os.FileInfo) bool {
- return f.Mode()&os.ModeSymlink != 0
-}
-
-// isSymlinkTargetDir return true if f's symbolic link target
-// is a directory. Return false if not a symbolic link.
-func isSymlinkTargetDir(f os.FileInfo, urlPath string, config *Config) bool {
- if !isSymlink(f) {
- return false
- }
-
- // a bit strange but we want Stat thru the jailed filesystem to be safe
- target, err := config.Fs.Root.Open(filepath.Join(urlPath, f.Name()))
- if err != nil {
- return false
- }
- defer target.Close()
- targetInto, err := target.Stat()
- if err != nil {
- return false
- }
-
- return targetInto.IsDir()
-}
-
-// ServeHTTP determines if the request is for this plugin, and if all prerequisites are met.
-// If so, control is handed over to ServeListing.
-func (b Browse) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- // See if there's a browse configuration to match the path
- var bc *Config
- for i := range b.Configs {
- if httpserver.Path(r.URL.Path).Matches(b.Configs[i].PathScope) {
- bc = &b.Configs[i]
- break
- }
- }
- if bc == nil {
- return b.Next.ServeHTTP(w, r)
- }
-
- // Browse works on existing directories; delegate everything else
- requestedFilepath, err := bc.Fs.Root.Open(r.URL.Path)
- if err != nil {
- switch {
- case os.IsPermission(err):
- return http.StatusForbidden, err
- case os.IsExist(err):
- return http.StatusNotFound, err
- default:
- return b.Next.ServeHTTP(w, r)
- }
- }
- defer requestedFilepath.Close()
-
- info, err := requestedFilepath.Stat()
- if err != nil {
- switch {
- case os.IsPermission(err):
- return http.StatusForbidden, err
- case os.IsExist(err):
- return http.StatusGone, err
- default:
- return b.Next.ServeHTTP(w, r)
- }
- }
- if !info.IsDir() {
- return b.Next.ServeHTTP(w, r)
- }
-
- // Do not reply to anything else because it might be nonsensical
- switch r.Method {
- case http.MethodGet, http.MethodHead:
- // proceed, noop
- case "PROPFIND", http.MethodOptions:
- return http.StatusNotImplemented, nil
- default:
- return b.Next.ServeHTTP(w, r)
- }
-
- // Browsing navigation gets messed up if browsing a directory
- // that doesn't end in "/" (which it should, anyway)
- u := *r.URL
- if u.Path == "" {
- u.Path = "/"
- }
- if u.Path[len(u.Path)-1] != '/' {
- u.Path += "/"
- http.Redirect(w, r, u.String(), http.StatusMovedPermanently)
- return http.StatusMovedPermanently, nil
- }
-
- return b.ServeListing(w, r, requestedFilepath, bc)
-}
-
-func (b Browse) loadDirectoryContents(requestedFilepath http.File, urlPath string, config *Config) (*Listing, bool, error) {
- files, err := requestedFilepath.Readdir(-1)
- if err != nil {
- return nil, false, err
- }
-
- // Determine if user can browse up another folder
- var canGoUp bool
- curPathDir := path.Dir(strings.TrimSuffix(urlPath, "/"))
- for _, other := range b.Configs {
- if strings.HasPrefix(curPathDir, other.PathScope) {
- canGoUp = true
- break
- }
- }
-
- // Assemble listing of directory contents
- listing, hasIndex := directoryListing(files, canGoUp, urlPath, config)
-
- return &listing, hasIndex, nil
-}
-
-// handleSortOrder gets and stores for a Listing the 'sort' and 'order',
-// and reads 'limit' if given. The latter is 0 if not given.
-//
-// This sets Cookies.
-func (b Browse) handleSortOrder(w http.ResponseWriter, r *http.Request, scope string) (sort string, order string, limit int, err error) {
- sort, order, limitQuery := r.URL.Query().Get("sort"), r.URL.Query().Get("order"), r.URL.Query().Get("limit")
-
- // If the query 'sort' or 'order' is empty, use defaults or any values previously saved in Cookies
- switch sort {
- case "":
- sort = sortByNameDirFirst
- if sortCookie, sortErr := r.Cookie("sort"); sortErr == nil {
- sort = sortCookie.Value
- }
- case sortByName, sortByNameDirFirst, sortBySize, sortByTime:
- http.SetCookie(w, &http.Cookie{Name: "sort", Value: sort, Path: scope, Secure: r.TLS != nil})
- }
-
- switch order {
- case "":
- order = "asc"
- if orderCookie, orderErr := r.Cookie("order"); orderErr == nil {
- order = orderCookie.Value
- }
- case "asc", "desc":
- http.SetCookie(w, &http.Cookie{Name: "order", Value: order, Path: scope, Secure: r.TLS != nil})
- }
-
- if limitQuery != "" {
- limit, err = strconv.Atoi(limitQuery)
- if err != nil { // if the 'limit' query can't be interpreted as a number, return err
- return
- }
- }
-
- return
-}
-
-// ServeListing returns a formatted view of 'requestedFilepath' contents'.
-func (b Browse) ServeListing(w http.ResponseWriter, r *http.Request, requestedFilepath http.File, bc *Config) (int, error) {
- listing, containsIndex, err := b.loadDirectoryContents(requestedFilepath, r.URL.Path, bc)
- if err != nil {
- switch {
- case os.IsPermission(err):
- return http.StatusForbidden, err
- case os.IsExist(err):
- return http.StatusGone, err
- default:
- return http.StatusInternalServerError, err
- }
- }
- if containsIndex && !b.IgnoreIndexes { // directory isn't browsable
- return b.Next.ServeHTTP(w, r)
- }
- listing.Context = httpserver.Context{
- Root: bc.Fs.Root,
- Req: r,
- URL: r.URL,
- }
- listing.User = bc.Variables
-
- // Copy the query values into the Listing struct
- var limit int
- listing.Sort, listing.Order, limit, err = b.handleSortOrder(w, r, bc.PathScope)
- if err != nil {
- return http.StatusBadRequest, err
- }
-
- listing.applySort()
-
- if limit > 0 && limit <= len(listing.Items) {
- listing.Items = listing.Items[:limit]
- listing.ItemsLimitedTo = limit
- }
-
- var buf *bytes.Buffer
- acceptHeader := strings.ToLower(strings.Join(r.Header["Accept"], ","))
- switch {
- case strings.Contains(acceptHeader, "application/json"):
- if buf, err = b.formatAsJSON(listing, bc); err != nil {
- return http.StatusInternalServerError, err
- }
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
-
- default: // There's no 'application/json' in the 'Accept' header; browse normally
- if buf, err = b.formatAsHTML(listing, bc); err != nil {
- return http.StatusInternalServerError, err
- }
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
-
- }
-
- buf.WriteTo(w)
-
- return http.StatusOK, nil
-}
-
-func (b Browse) formatAsJSON(listing *Listing, bc *Config) (*bytes.Buffer, error) {
- marsh, err := json.Marshal(listing.Items)
- if err != nil {
- return nil, err
- }
-
- buf := new(bytes.Buffer)
- _, err = buf.Write(marsh)
- return buf, err
-}
-
-func (b Browse) formatAsHTML(listing *Listing, bc *Config) (*bytes.Buffer, error) {
- buf := new(bytes.Buffer)
- err := bc.Template.Execute(buf, listing)
- return buf, err
-}
diff --git a/caddyhttp/browse/browse_test.go b/caddyhttp/browse/browse_test.go
deleted file mode 100644
index 1e666021c68..00000000000
--- a/caddyhttp/browse/browse_test.go
+++ /dev/null
@@ -1,602 +0,0 @@
-package browse
-
-import (
- "context"
- "encoding/json"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "net/url"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "testing"
- "text/template"
- "time"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
- "github.com/mholt/caddy/caddyhttp/staticfiles"
-)
-
-const testDirPrefix = "caddy_browse_test"
-
-func TestSort(t *testing.T) {
- // making up []fileInfo with bogus values;
- // to be used to make up our "listing"
- fileInfos := []FileInfo{
- {
- Name: "fizz",
- Size: 4,
- ModTime: time.Now().AddDate(-1, 1, 0),
- },
- {
- Name: "buzz",
- Size: 2,
- ModTime: time.Now().AddDate(0, -3, 3),
- },
- {
- Name: "bazz",
- Size: 1,
- ModTime: time.Now().AddDate(0, -2, -23),
- },
- {
- Name: "jazz",
- Size: 3,
- ModTime: time.Now(),
- },
- }
- listing := Listing{
- Name: "foobar",
- Path: "/fizz/buzz",
- CanGoUp: false,
- Items: fileInfos,
- }
-
- // sort by name
- listing.Sort = "name"
- listing.applySort()
- if !sort.IsSorted(byName(listing)) {
- t.Errorf("The listing isn't name sorted: %v", listing.Items)
- }
-
- // sort by size
- listing.Sort = "size"
- listing.applySort()
- if !sort.IsSorted(bySize(listing)) {
- t.Errorf("The listing isn't size sorted: %v", listing.Items)
- }
-
- // sort by Time
- listing.Sort = "time"
- listing.applySort()
- if !sort.IsSorted(byTime(listing)) {
- t.Errorf("The listing isn't time sorted: %v", listing.Items)
- }
-
- // sort by name dir first
- listing.Sort = "namedirfirst"
- listing.applySort()
- if !sort.IsSorted(byNameDirFirst(listing)) {
- t.Errorf("The listing isn't namedirfirst sorted: %v", listing.Items)
- }
-
- // reverse by name
- listing.Sort = "name"
- listing.Order = "desc"
- listing.applySort()
- if !isReversed(byName(listing)) {
- t.Errorf("The listing isn't reversed by name: %v", listing.Items)
- }
-
- // reverse by size
- listing.Sort = "size"
- listing.Order = "desc"
- listing.applySort()
- if !isReversed(bySize(listing)) {
- t.Errorf("The listing isn't reversed by size: %v", listing.Items)
- }
-
- // reverse by time
- listing.Sort = "time"
- listing.Order = "desc"
- listing.applySort()
- if !isReversed(byTime(listing)) {
- t.Errorf("The listing isn't reversed by time: %v", listing.Items)
- }
-
- // reverse by name dir first
- listing.Sort = "namedirfirst"
- listing.Order = "desc"
- listing.applySort()
- if !isReversed(byNameDirFirst(listing)) {
- t.Errorf("The listing isn't reversed by namedirfirst: %v", listing.Items)
- }
-}
-
-func TestBrowseHTTPMethods(t *testing.T) {
- tmpl, err := template.ParseFiles("testdata/photos.tpl")
- if err != nil {
- t.Fatalf("An error occurred while parsing the template: %v", err)
- }
-
- b := Browse{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- return http.StatusTeapot, nil // not t.Fatalf, or we will not see what other methods yield
- }),
- Configs: []Config{
- {
- PathScope: "/photos",
- Fs: staticfiles.FileServer{
- Root: http.Dir("./testdata"),
- },
- Template: tmpl,
- },
- },
- }
-
- rec := httptest.NewRecorder()
- for method, expected := range map[string]int{
- http.MethodGet: http.StatusOK,
- http.MethodHead: http.StatusOK,
- http.MethodOptions: http.StatusNotImplemented,
- "PROPFIND": http.StatusNotImplemented,
- } {
- req, err := http.NewRequest(method, "/photos/", nil)
- if err != nil {
- t.Fatalf("Test: Could not create HTTP request: %v", err)
- }
- ctx := context.WithValue(req.Context(), httpserver.OriginalURLCtxKey, *req.URL)
- req = req.WithContext(ctx)
-
- code, _ := b.ServeHTTP(rec, req)
- if code != expected {
- t.Errorf("Wrong status with HTTP Method %s: expected %d, got %d", method, expected, code)
- }
- }
-}
-
-func TestBrowseTemplate(t *testing.T) {
- tmpl, err := template.ParseFiles("testdata/photos.tpl")
- if err != nil {
- t.Fatalf("An error occurred while parsing the template: %v", err)
- }
-
- b := Browse{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- t.Fatalf("Next shouldn't be called")
- return 0, nil
- }),
- Configs: []Config{
- {
- PathScope: "/photos",
- Fs: staticfiles.FileServer{
- Root: http.Dir("./testdata"),
- Hide: []string{"photos/hidden.html"},
- },
- Template: tmpl,
- },
- },
- }
-
- req, err := http.NewRequest("GET", "/photos/", nil)
- if err != nil {
- t.Fatalf("Test: Could not create HTTP request: %v", err)
- }
- ctx := context.WithValue(req.Context(), httpserver.OriginalURLCtxKey, *req.URL)
- req = req.WithContext(ctx)
-
- rec := httptest.NewRecorder()
-
- code, _ := b.ServeHTTP(rec, req)
- if code != http.StatusOK {
- t.Fatalf("Wrong status, expected %d, got %d", http.StatusOK, code)
- }
-
- respBody := rec.Body.String()
- expectedBody := `
-
-
-Template
-
-
-
Header
-
-
/photos/
-
-test1
-
-test.html
-
-test2.html
-
-test3.html
-
-
-
-`
-
- if respBody != expectedBody {
- t.Fatalf("Expected body: '%v' got: '%v'", expectedBody, respBody)
- }
-
-}
-
-func TestBrowseJson(t *testing.T) {
- b := Browse{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- t.Fatalf("Next shouldn't be called")
- return 0, nil
- }),
- Configs: []Config{
- {
- PathScope: "/photos/",
- Fs: staticfiles.FileServer{
- Root: http.Dir("./testdata"),
- },
- },
- },
- }
-
- //Getting the listing from the ./testdata/photos, the listing returned will be used to validate test results
- testDataPath := filepath.Join("./testdata", "photos")
- file, err := os.Open(testDataPath)
- if err != nil {
- if os.IsPermission(err) {
- t.Fatalf("Os Permission Error")
- }
- }
- defer file.Close()
-
- files, err := file.Readdir(-1)
- if err != nil {
- t.Fatalf("Unable to Read Contents of the directory")
- }
- var fileinfos []FileInfo
-
- for i, f := range files {
- name := f.Name()
-
- // Tests fail in CI environment because all file mod times are the same for
- // some reason, making the sorting unpredictable. To hack around this,
- // we ensure here that each file has a different mod time.
- chTime := f.ModTime().UTC().Add(-(time.Duration(i) * time.Second))
- if err := os.Chtimes(filepath.Join(testDataPath, name), chTime, chTime); err != nil {
- t.Fatal(err)
- }
-
- if f.IsDir() {
- name += "/"
- }
-
- url := url.URL{Path: "./" + name}
-
- fileinfos = append(fileinfos, FileInfo{
- IsDir: f.IsDir(),
- Name: f.Name(),
- Size: f.Size(),
- URL: url.String(),
- ModTime: chTime,
- Mode: f.Mode(),
- })
- }
-
- // Test that sort=name returns correct listing.
-
- listing := Listing{Items: fileinfos} // this listing will be used for validation inside the tests
-
- tests := []struct {
- QueryURL string
- SortBy string
- OrderBy string
- Limit int
- shouldErr bool
- expectedResult []FileInfo
- }{
- //test case 1: testing for default sort and order and without the limit parameter, default sort is by name and the default order is ascending
- //without the limit query entire listing will be produced
- {"/?sort=name", "", "", -1, false, listing.Items},
- //test case 2: limit is set to 1, orderBy and sortBy is default
- {"/?limit=1&sort=name", "", "", 1, false, listing.Items[:1]},
- //test case 3 : if the listing request is bigger than total size of listing then it should return everything
- {"/?limit=100000000&sort=name", "", "", 100000000, false, listing.Items},
- //test case 4 : testing for negative limit
- {"/?limit=-1&sort=name", "", "", -1, false, listing.Items},
- //test case 5 : testing with limit set to -1 and order set to descending
- {"/?limit=-1&order=desc&sort=name", "", "desc", -1, false, listing.Items},
- //test case 6 : testing with limit set to 2 and order set to descending
- {"/?limit=2&order=desc&sort=name", "", "desc", 2, false, listing.Items},
- //test case 7 : testing with limit set to 3 and order set to descending
- {"/?limit=3&order=desc&sort=name", "", "desc", 3, false, listing.Items},
- //test case 8 : testing with limit set to 3 and order set to ascending
- {"/?limit=3&order=asc&sort=name", "", "asc", 3, false, listing.Items},
- //test case 9 : testing with limit set to 1111111 and order set to ascending
- {"/?limit=1111111&order=asc&sort=name", "", "asc", 1111111, false, listing.Items},
- //test case 10 : testing with limit set to default and order set to ascending and sorting by size
- {"/?order=asc&sort=size&sort=name", "size", "asc", -1, false, listing.Items},
- //test case 11 : testing with limit set to default and order set to ascending and sorting by last modified
- {"/?order=asc&sort=time&sort=name", "time", "asc", -1, false, listing.Items},
- //test case 12 : testing with limit set to 1 and order set to ascending and sorting by last modified
- {"/?order=asc&sort=time&limit=1&sort=name", "time", "asc", 1, false, listing.Items},
- //test case 13 : testing with limit set to -100 and order set to ascending and sorting by last modified
- {"/?order=asc&sort=time&limit=-100&sort=name", "time", "asc", -100, false, listing.Items},
- //test case 14 : testing with limit set to -100 and order set to ascending and sorting by size
- {"/?order=asc&sort=size&limit=-100&sort=name", "size", "asc", -100, false, listing.Items},
- }
-
- for i, test := range tests {
- var marsh []byte
- req, err := http.NewRequest("GET", "/photos"+test.QueryURL, nil)
- if err != nil && !test.shouldErr {
- t.Errorf("Test %d errored when making request, but it shouldn't have; got '%v'", i, err)
- }
- ctx := context.WithValue(req.Context(), httpserver.OriginalURLCtxKey, *req.URL)
- req = req.WithContext(ctx)
-
- req.Header.Set("Accept", "application/json")
- rec := httptest.NewRecorder()
-
- code, err := b.ServeHTTP(rec, req)
- if err == nil && test.shouldErr {
- t.Errorf("Test %d didn't error, but it should have", i)
- } else if err != nil && !test.shouldErr {
- t.Errorf("Test %d errored, but it shouldn't have; got '%v'", i, err)
- }
- if code != http.StatusOK {
- t.Fatalf("In test %d: Wrong status, expected %d, got %d", i, http.StatusOK, code)
- }
- if rec.HeaderMap.Get("Content-Type") != "application/json; charset=utf-8" {
- t.Fatalf("Expected Content type to be application/json; charset=utf-8, but got %s ", rec.HeaderMap.Get("Content-Type"))
- }
-
- actualJSONResponse := rec.Body.String()
- copyOflisting := listing
- if test.SortBy == "" {
- copyOflisting.Sort = "name"
- } else {
- copyOflisting.Sort = test.SortBy
- }
- if test.OrderBy == "" {
- copyOflisting.Order = "asc"
- } else {
- copyOflisting.Order = test.OrderBy
- }
-
- copyOflisting.applySort()
-
- limit := test.Limit
- if limit <= len(copyOflisting.Items) && limit > 0 {
- marsh, err = json.Marshal(copyOflisting.Items[:limit])
- } else { // if the 'limit' query is empty, or has the wrong value, list everything
- marsh, err = json.Marshal(copyOflisting.Items)
- }
-
- if err != nil {
- t.Fatalf("Unable to Marshal the listing ")
- }
- expectedJSON := string(marsh)
-
- if actualJSONResponse != expectedJSON {
- t.Errorf("JSON response doesn't match the expected for test number %d with sort=%s, order=%s\nExpected response %s\nActual response = %s\n",
- i+1, test.SortBy, test.OrderBy, expectedJSON, actualJSONResponse)
- }
- }
-}
-
-// "sort" package has "IsSorted" function, but no "IsReversed";
-func isReversed(data sort.Interface) bool {
- n := data.Len()
- for i := n - 1; i > 0; i-- {
- if !data.Less(i, i-1) {
- return false
- }
- }
- return true
-}
-
-func TestBrowseRedirect(t *testing.T) {
- testCases := []struct {
- url string
- statusCode int
- returnCode int
- location string
- }{
- {
- "http://www.example.com/photos",
- http.StatusMovedPermanently,
- http.StatusMovedPermanently,
- "http://www.example.com/photos/",
- },
- {
- "/photos",
- http.StatusMovedPermanently,
- http.StatusMovedPermanently,
- "/photos/",
- },
- }
-
- for i, tc := range testCases {
- b := Browse{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- t.Fatalf("Test %d - Next shouldn't be called", i)
- return 0, nil
- }),
- Configs: []Config{
- {
- PathScope: "/photos",
- Fs: staticfiles.FileServer{
- Root: http.Dir("./testdata"),
- },
- },
- },
- }
-
- req, err := http.NewRequest("GET", tc.url, nil)
- if err != nil {
- t.Fatalf("Test %d - could not create HTTP request: %v", i, err)
- }
- ctx := context.WithValue(req.Context(), httpserver.OriginalURLCtxKey, *req.URL)
- req = req.WithContext(ctx)
-
- rec := httptest.NewRecorder()
-
- returnCode, _ := b.ServeHTTP(rec, req)
- if returnCode != tc.returnCode {
- t.Fatalf("Test %d - wrong return code, expected %d, got %d",
- i, tc.returnCode, returnCode)
- }
-
- if got := rec.Code; got != tc.statusCode {
- t.Errorf("Test %d - wrong status, expected %d, got %d",
- i, tc.statusCode, got)
- }
-
- if got := rec.Header().Get("Location"); got != tc.location {
- t.Errorf("Test %d - wrong Location header, expected %s, got %s",
- i, tc.location, got)
- }
- }
-}
-
-func TestDirSymlink(t *testing.T) {
- testCases := []struct {
- source string
- target string
- pathScope string
- url string
- expectedName string
- expectedURL string
- }{
- // test case can expect a directory "dir" and a symlink to it called "symlink"
-
- {"dir", "$TMP/rel_symlink_to_dir", "/", "/",
- "rel_symlink_to_dir", "./rel_symlink_to_dir/"},
- {"$TMP/dir", "$TMP/abs_symlink_to_dir", "/", "/",
- "abs_symlink_to_dir", "./abs_symlink_to_dir/"},
-
- {"../../dir", "$TMP/sub/dir/rel_symlink_to_dir", "/", "/sub/dir/",
- "rel_symlink_to_dir", "./rel_symlink_to_dir/"},
- {"$TMP/dir", "$TMP/sub/dir/abs_symlink_to_dir", "/", "/sub/dir/",
- "abs_symlink_to_dir", "./abs_symlink_to_dir/"},
-
- {"../../dir", "$TMP/with/scope/rel_symlink_to_dir", "/with/scope", "/with/scope/",
- "rel_symlink_to_dir", "./rel_symlink_to_dir/"},
- {"$TMP/dir", "$TMP/with/scope/abs_symlink_to_dir", "/with/scope", "/with/scope/",
- "abs_symlink_to_dir", "./abs_symlink_to_dir/"},
-
- {"../../../../dir", "$TMP/with/scope/sub/dir/rel_symlink_to_dir", "/with/scope", "/with/scope/sub/dir/",
- "rel_symlink_to_dir", "./rel_symlink_to_dir/"},
- {"$TMP/dir", "$TMP/with/scope/sub/dir/abs_symlink_to_dir", "/with/scope", "/with/scope/sub/dir/",
- "abs_symlink_to_dir", "./abs_symlink_to_dir/"},
-
- {"symlink", "$TMP/rel_symlink_to_symlink", "/", "/",
- "rel_symlink_to_symlink", "./rel_symlink_to_symlink/"},
- {"$TMP/symlink", "$TMP/abs_symlink_to_symlink", "/", "/",
- "abs_symlink_to_symlink", "./abs_symlink_to_symlink/"},
-
- {"../../symlink", "$TMP/sub/dir/rel_symlink_to_symlink", "/", "/sub/dir/",
- "rel_symlink_to_symlink", "./rel_symlink_to_symlink/"},
- {"$TMP/symlink", "$TMP/sub/dir/abs_symlink_to_symlink", "/", "/sub/dir/",
- "abs_symlink_to_symlink", "./abs_symlink_to_symlink/"},
-
- {"../../symlink", "$TMP/with/scope/rel_symlink_to_symlink", "/with/scope", "/with/scope/",
- "rel_symlink_to_symlink", "./rel_symlink_to_symlink/"},
- {"$TMP/symlink", "$TMP/with/scope/abs_symlink_to_symlink", "/with/scope", "/with/scope/",
- "abs_symlink_to_symlink", "./abs_symlink_to_symlink/"},
-
- {"../../../../symlink", "$TMP/with/scope/sub/dir/rel_symlink_to_symlink", "/with/scope", "/with/scope/sub/dir/",
- "rel_symlink_to_symlink", "./rel_symlink_to_symlink/"},
- {"$TMP/symlink", "$TMP/with/scope/sub/dir/abs_symlink_to_symlink", "/with/scope", "/with/scope/sub/dir/",
- "abs_symlink_to_symlink", "./abs_symlink_to_symlink/"},
- }
-
- for i, tc := range testCases {
- func() {
- tmpdir, err := ioutil.TempDir("", testDirPrefix)
- if err != nil {
- t.Fatalf("failed to create test directory: %v", err)
- }
- defer os.RemoveAll(tmpdir)
-
- if err := os.MkdirAll(filepath.Join(tmpdir, "dir"), 0755); err != nil {
- t.Fatalf("failed to create test dir 'dir': %v", err)
- }
- if err := os.Symlink("dir", filepath.Join(tmpdir, "symlink")); err != nil {
- t.Fatalf("failed to create test symlink 'symlink': %v", err)
- }
-
- sourceResolved := strings.Replace(tc.source, "$TMP", tmpdir, -1)
- targetResolved := strings.Replace(tc.target, "$TMP", tmpdir, -1)
-
- if err := os.MkdirAll(filepath.Dir(sourceResolved), 0755); err != nil {
- t.Fatalf("failed to create source symlink dir: %v", err)
- }
- if err := os.MkdirAll(filepath.Dir(targetResolved), 0755); err != nil {
- t.Fatalf("failed to create target symlink dir: %v", err)
- }
- if err := os.Symlink(sourceResolved, targetResolved); err != nil {
- t.Fatalf("failed to create test symlink: %v", err)
- }
-
- b := Browse{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- t.Fatalf("Test %d - Next shouldn't be called", i)
- return 0, nil
- }),
- Configs: []Config{
- {
- PathScope: tc.pathScope,
- Fs: staticfiles.FileServer{
- Root: http.Dir(tmpdir),
- },
- },
- },
- }
-
- req, err := http.NewRequest("GET", tc.url, nil)
- req.Header.Add("Accept", "application/json")
- if err != nil {
- t.Fatalf("Test %d - could not create HTTP request: %v", i, err)
- }
-
- rec := httptest.NewRecorder()
-
- returnCode, _ := b.ServeHTTP(rec, req)
- if returnCode != http.StatusOK {
- t.Fatalf("Test %d - wrong return code, expected %d, got %d",
- i, http.StatusOK, returnCode)
- }
-
- type jsonEntry struct {
- Name string
- IsDir bool
- IsSymlink bool
- URL string
- }
- var entries []jsonEntry
- if err := json.Unmarshal(rec.Body.Bytes(), &entries); err != nil {
- t.Fatalf("Test %d - failed to parse json: %v", i, err)
- }
-
- found := false
- for _, e := range entries {
- if e.Name != tc.expectedName {
- continue
- }
- found = true
- if !e.IsDir {
- t.Fatalf("Test %d - expected to be a dir, got %v", i, e.IsDir)
- }
- if !e.IsSymlink {
- t.Fatalf("Test %d - expected to be a symlink, got %v", i, e.IsSymlink)
- }
- if e.URL != tc.expectedURL {
- t.Fatalf("Test %d - wrong URL, expected %v, got %v", i, tc.expectedURL, e.URL)
- }
- }
- if !found {
- t.Fatalf("Test %d - failed, could not find name %v", i, tc.expectedName)
- }
- }()
- }
-}
diff --git a/caddyhttp/browse/setup.go b/caddyhttp/browse/setup.go
deleted file mode 100644
index 0abf728efdd..00000000000
--- a/caddyhttp/browse/setup.go
+++ /dev/null
@@ -1,488 +0,0 @@
-package browse
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "text/template"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
- "github.com/mholt/caddy/caddyhttp/staticfiles"
-)
-
-func init() {
- caddy.RegisterPlugin("browse", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// setup configures a new Browse middleware instance.
-func setup(c *caddy.Controller) error {
- configs, err := browseParse(c)
- if err != nil {
- return err
- }
-
- b := Browse{
- Configs: configs,
- IgnoreIndexes: false,
- }
-
- httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- b.Next = next
- return b
- })
-
- return nil
-}
-
-func browseParse(c *caddy.Controller) ([]Config, error) {
- var configs []Config
-
- cfg := httpserver.GetConfig(c)
-
- appendCfg := func(bc Config) error {
- for _, c := range configs {
- if c.PathScope == bc.PathScope {
- return fmt.Errorf("duplicate browsing config for %s", c.PathScope)
- }
- }
- configs = append(configs, bc)
- return nil
- }
-
- for c.Next() {
- var bc Config
-
- // First argument is directory to allow browsing; default is site root
- if c.NextArg() {
- bc.PathScope = c.Val()
- } else {
- bc.PathScope = "/"
- }
-
- bc.Fs = staticfiles.FileServer{
- Root: http.Dir(cfg.Root),
- Hide: cfg.HiddenFiles,
- }
-
- // Second argument would be the template file to use
- var tplText string
- if c.NextArg() {
- tplBytes, err := ioutil.ReadFile(c.Val())
- if err != nil {
- return configs, err
- }
- tplText = string(tplBytes)
- } else {
- tplText = defaultTemplate
- }
-
- // Build the template
- tpl, err := template.New("listing").Parse(tplText)
- if err != nil {
- return configs, err
- }
- bc.Template = tpl
-
- // Save configuration
- err = appendCfg(bc)
- if err != nil {
- return configs, err
- }
- }
-
- return configs, nil
-}
-
-// The default template to use when serving up directory listings
-const defaultTemplate = `
-
-
- {{html .Name}}
-
-
-
-
-
-
-
-
-
- {{.NumDirs}} director{{if eq 1 .NumDirs}}y{{else}}ies{{end}}
- {{.NumFiles}} file{{if ne 1 .NumFiles}}s{{end}}
- {{- if ne 0 .ItemsLimitedTo}}
- (of which only {{.ItemsLimitedTo}} are displayed)
- {{- end}}
-
-
-
-
-
-
-
-
- {{- if and (eq .Sort "namedirfirst") (ne .Order "desc")}}
-
- {{- else if and (eq .Sort "namedirfirst") (ne .Order "asc")}}
-
- {{- else}}
-
- {{- end}}
-
- {{- if and (eq .Sort "name") (ne .Order "desc")}}
- Name
- {{- else if and (eq .Sort "name") (ne .Order "asc")}}
- Name
- {{- else}}
- Name
- {{- end}}
-
-
- {{- if and (eq .Sort "size") (ne .Order "desc")}}
- Size
- {{- else if and (eq .Sort "size") (ne .Order "asc")}}
- Size
- {{- else}}
- Size
- {{- end}}
-
-
- {{- if and (eq .Sort "time") (ne .Order "desc")}}
- Modified
- {{- else if and (eq .Sort "time") (ne .Order "asc")}}
- Modified
- {{- else}}
- Modified
- {{- end}}
-
-{{range .Items}}
-{{.Name}}
-{{end}}
-
-
diff --git a/caddyhttp/browse/testdata/photos/hidden.html b/caddyhttp/browse/testdata/photos/hidden.html
deleted file mode 100644
index e0f5c6c20a5..00000000000
--- a/caddyhttp/browse/testdata/photos/hidden.html
+++ /dev/null
@@ -1 +0,0 @@
-Should be hidden
diff --git a/caddyhttp/browse/testdata/photos/test.html b/caddyhttp/browse/testdata/photos/test.html
deleted file mode 100644
index 40535a2234e..00000000000
--- a/caddyhttp/browse/testdata/photos/test.html
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-
-Test
-
-
-
-
diff --git a/caddyhttp/browse/testdata/photos/test1/test.html b/caddyhttp/browse/testdata/photos/test1/test.html
deleted file mode 100644
index 40535a2234e..00000000000
--- a/caddyhttp/browse/testdata/photos/test1/test.html
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-
-Test
-
-
-
-
diff --git a/caddyhttp/browse/testdata/photos/test2.html b/caddyhttp/browse/testdata/photos/test2.html
deleted file mode 100644
index 8e10c578040..00000000000
--- a/caddyhttp/browse/testdata/photos/test2.html
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-
-Test 2
-
-
-
-
diff --git a/caddyhttp/browse/testdata/photos/test3.html b/caddyhttp/browse/testdata/photos/test3.html
deleted file mode 100644
index 6c70af2fa67..00000000000
--- a/caddyhttp/browse/testdata/photos/test3.html
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
\ No newline at end of file
diff --git a/caddyhttp/caddyhttp.go b/caddyhttp/caddyhttp.go
deleted file mode 100644
index 99215bdb74a..00000000000
--- a/caddyhttp/caddyhttp.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package caddyhttp
-
-import (
- // plug in the server
- _ "github.com/mholt/caddy/caddyhttp/httpserver"
-
- // plug in the standard directives
- _ "github.com/mholt/caddy/caddyhttp/basicauth"
- _ "github.com/mholt/caddy/caddyhttp/bind"
- _ "github.com/mholt/caddy/caddyhttp/browse"
- _ "github.com/mholt/caddy/caddyhttp/errors"
- _ "github.com/mholt/caddy/caddyhttp/expvar"
- _ "github.com/mholt/caddy/caddyhttp/extensions"
- _ "github.com/mholt/caddy/caddyhttp/fastcgi"
- _ "github.com/mholt/caddy/caddyhttp/gzip"
- _ "github.com/mholt/caddy/caddyhttp/header"
- _ "github.com/mholt/caddy/caddyhttp/index"
- _ "github.com/mholt/caddy/caddyhttp/internalsrv"
- _ "github.com/mholt/caddy/caddyhttp/limits"
- _ "github.com/mholt/caddy/caddyhttp/log"
- _ "github.com/mholt/caddy/caddyhttp/markdown"
- _ "github.com/mholt/caddy/caddyhttp/mime"
- _ "github.com/mholt/caddy/caddyhttp/pprof"
- _ "github.com/mholt/caddy/caddyhttp/proxy"
- _ "github.com/mholt/caddy/caddyhttp/push"
- _ "github.com/mholt/caddy/caddyhttp/redirect"
- _ "github.com/mholt/caddy/caddyhttp/requestid"
- _ "github.com/mholt/caddy/caddyhttp/rewrite"
- _ "github.com/mholt/caddy/caddyhttp/root"
- _ "github.com/mholt/caddy/caddyhttp/status"
- _ "github.com/mholt/caddy/caddyhttp/templates"
- _ "github.com/mholt/caddy/caddyhttp/timeouts"
- _ "github.com/mholt/caddy/caddyhttp/websocket"
- _ "github.com/mholt/caddy/startupshutdown"
-)
diff --git a/caddyhttp/caddyhttp_test.go b/caddyhttp/caddyhttp_test.go
deleted file mode 100644
index 99ffdbefa0f..00000000000
--- a/caddyhttp/caddyhttp_test.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package caddyhttp
-
-import (
- "strings"
- "testing"
-
- "github.com/mholt/caddy"
-)
-
-// TODO: this test could be improved; the purpose is to
-// ensure that the standard plugins are in fact plugged in
-// and registered properly; this is a quick/naive way to do it.
-func TestStandardPlugins(t *testing.T) {
- numStandardPlugins := 32 // importing caddyhttp plugs in this many plugins
- s := caddy.DescribePlugins()
- if got, want := strings.Count(s, "\n"), numStandardPlugins+5; got != want {
- t.Errorf("Expected all standard plugins to be plugged in, got:\n%s", s)
- }
-}
diff --git a/caddyhttp/errors/errors.go b/caddyhttp/errors/errors.go
deleted file mode 100644
index a3412ce5020..00000000000
--- a/caddyhttp/errors/errors.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Package errors implements an HTTP error handling middleware.
-package errors
-
-import (
- "fmt"
- "io"
- "net/http"
- "os"
- "runtime"
- "strings"
- "time"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("errors", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// ErrorHandler handles HTTP errors (and errors from other middleware).
-type ErrorHandler struct {
- Next httpserver.Handler
- GenericErrorPage string // default error page filename
- ErrorPages map[int]string // map of status code to filename
- Log *httpserver.Logger
- Debug bool // if true, errors are written out to client rather than to a log
-}
-
-func (h ErrorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- defer h.recovery(w, r)
-
- status, err := h.Next.ServeHTTP(w, r)
-
- if err != nil {
- errMsg := fmt.Sprintf("%s [ERROR %d %s] %v", time.Now().Format(timeFormat), status, r.URL.Path, err)
- if h.Debug {
- // Write error to response instead of to log
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- w.WriteHeader(status)
- fmt.Fprintln(w, errMsg)
- return 0, err // returning 0 signals that a response has been written
- }
- h.Log.Println(errMsg)
- }
-
- if status >= 400 {
- h.errorPage(w, r, status)
- return 0, err
- }
-
- return status, err
-}
-
-// errorPage serves a static error page to w according to the status
-// code. If there is an error serving the error page, a plaintext error
-// message is written instead, and the extra error is logged.
-func (h ErrorHandler) errorPage(w http.ResponseWriter, r *http.Request, code int) {
- // See if an error page for this status code was specified
- if pagePath, ok := h.findErrorPage(code); ok {
- // Try to open it
- errorPage, err := os.Open(pagePath)
- if err != nil {
- // An additional error handling an error...
- h.Log.Printf("%s [NOTICE %d %s] could not load error page: %v",
- time.Now().Format(timeFormat), code, r.URL.String(), err)
- httpserver.DefaultErrorFunc(w, r, code)
- return
- }
- defer errorPage.Close()
-
- // Copy the page body into the response
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- w.WriteHeader(code)
- _, err = io.Copy(w, errorPage)
-
- if err != nil {
- // Epic fail... sigh.
- h.Log.Printf("%s [NOTICE %d %s] could not respond with %s: %v",
- time.Now().Format(timeFormat), code, r.URL.String(), pagePath, err)
- httpserver.DefaultErrorFunc(w, r, code)
- }
-
- return
- }
-
- // Default error response
- httpserver.DefaultErrorFunc(w, r, code)
-}
-
-func (h ErrorHandler) findErrorPage(code int) (string, bool) {
- if pagePath, ok := h.ErrorPages[code]; ok {
- return pagePath, true
- }
-
- if h.GenericErrorPage != "" {
- return h.GenericErrorPage, true
- }
-
- return "", false
-}
-
-func (h ErrorHandler) recovery(w http.ResponseWriter, r *http.Request) {
- rec := recover()
- if rec == nil {
- return
- }
-
- // Obtain source of panic
- // From: https://gist.github.com/swdunlop/9629168
- var name, file string // function name, file name
- var line int
- var pc [16]uintptr
- n := runtime.Callers(3, pc[:])
- for _, pc := range pc[:n] {
- fn := runtime.FuncForPC(pc)
- if fn == nil {
- continue
- }
- file, line = fn.FileLine(pc)
- name = fn.Name()
- if !strings.HasPrefix(name, "runtime.") {
- break
- }
- }
-
- // Trim file path
- delim := "/caddy/"
- pkgPathPos := strings.Index(file, delim)
- if pkgPathPos > -1 && len(file) > pkgPathPos+len(delim) {
- file = file[pkgPathPos+len(delim):]
- }
-
- panicMsg := fmt.Sprintf("%s [PANIC %s] %s:%d - %v", time.Now().Format(timeFormat), r.URL.String(), file, line, rec)
- if h.Debug {
- // Write error and stack trace to the response rather than to a log
- var stackBuf [4096]byte
- stack := stackBuf[:runtime.Stack(stackBuf[:], false)]
- httpserver.WriteTextResponse(w, http.StatusInternalServerError, fmt.Sprintf("%s\n\n%s", panicMsg, stack))
- } else {
- // Currently we don't use the function name, since file:line is more conventional
- h.Log.Printf(panicMsg)
- h.errorPage(w, r, http.StatusInternalServerError)
- }
-}
-
-const timeFormat = "02/Jan/2006:15:04:05 -0700"
diff --git a/caddyhttp/errors/errors_test.go b/caddyhttp/errors/errors_test.go
deleted file mode 100644
index 4833ecb9a42..00000000000
--- a/caddyhttp/errors/errors_test.go
+++ /dev/null
@@ -1,258 +0,0 @@
-package errors
-
-import (
- "bytes"
- "errors"
- "fmt"
- "net/http"
- "net/http/httptest"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestErrors(t *testing.T) {
- // create a temporary page
- const content = "This is a error page"
-
- path, err := createErrorPageFile("errors_test.html", content)
- if err != nil {
- t.Fatal(err)
- }
- defer os.Remove(path)
-
- buf := bytes.Buffer{}
- em := ErrorHandler{
- ErrorPages: map[int]string{
- http.StatusNotFound: path,
- http.StatusForbidden: "not_exist_file",
- },
- Log: httpserver.NewTestLogger(&buf),
- }
- _, notExistErr := os.Open("not_exist_file")
-
- testErr := errors.New("test error")
- tests := []struct {
- next httpserver.Handler
- expectedCode int
- expectedBody string
- expectedLog string
- expectedErr error
- }{
- {
- next: genErrorHandler(http.StatusOK, nil, "normal"),
- expectedCode: http.StatusOK,
- expectedBody: "normal",
- expectedLog: "",
- expectedErr: nil,
- },
- {
- next: genErrorHandler(http.StatusMovedPermanently, testErr, ""),
- expectedCode: http.StatusMovedPermanently,
- expectedBody: "",
- expectedLog: fmt.Sprintf("[ERROR %d %s] %v\n", http.StatusMovedPermanently, "/", testErr),
- expectedErr: testErr,
- },
- {
- next: genErrorHandler(http.StatusBadRequest, nil, ""),
- expectedCode: 0,
- expectedBody: fmt.Sprintf("%d %s\n", http.StatusBadRequest,
- http.StatusText(http.StatusBadRequest)),
- expectedLog: "",
- expectedErr: nil,
- },
- {
- next: genErrorHandler(http.StatusNotFound, nil, ""),
- expectedCode: 0,
- expectedBody: content,
- expectedLog: "",
- expectedErr: nil,
- },
- {
- next: genErrorHandler(http.StatusForbidden, nil, ""),
- expectedCode: 0,
- expectedBody: fmt.Sprintf("%d %s\n", http.StatusForbidden,
- http.StatusText(http.StatusForbidden)),
- expectedLog: fmt.Sprintf("[NOTICE %d /] could not load error page: %v\n",
- http.StatusForbidden, notExistErr),
- expectedErr: nil,
- },
- }
-
- req, err := http.NewRequest("GET", "/", nil)
- if err != nil {
- t.Fatal(err)
- }
- for i, test := range tests {
- em.Next = test.next
- buf.Reset()
- rec := httptest.NewRecorder()
- code, err := em.ServeHTTP(rec, req)
-
- if err != test.expectedErr {
- t.Errorf("Test %d: Expected error %v, but got %v",
- i, test.expectedErr, err)
- }
- if code != test.expectedCode {
- t.Errorf("Test %d: Expected status code %d, but got %d",
- i, test.expectedCode, code)
- }
- if body := rec.Body.String(); body != test.expectedBody {
- t.Errorf("Test %d: Expected body %q, but got %q",
- i, test.expectedBody, body)
- }
- if log := buf.String(); !strings.Contains(log, test.expectedLog) {
- t.Errorf("Test %d: Expected log %q, but got %q",
- i, test.expectedLog, log)
- }
- }
-}
-
-func TestVisibleErrorWithPanic(t *testing.T) {
- const panicMsg = "I'm a panic"
- eh := ErrorHandler{
- ErrorPages: make(map[int]string),
- Debug: true,
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- panic(panicMsg)
- }),
- }
-
- req, err := http.NewRequest("GET", "/", nil)
- if err != nil {
- t.Fatal(err)
- }
- rec := httptest.NewRecorder()
-
- code, err := eh.ServeHTTP(rec, req)
-
- if code != 0 {
- t.Errorf("Expected error handler to return 0 (it should write to response), got status %d", code)
- }
- if err != nil {
- t.Errorf("Expected error handler to return nil error (it should panic!), but got '%v'", err)
- }
-
- body := rec.Body.String()
-
- if !strings.Contains(body, "[PANIC /] caddyhttp/errors/errors_test.go") {
- t.Errorf("Expected response body to contain error log line, but it didn't:\n%s", body)
- }
- if !strings.Contains(body, panicMsg) {
- t.Errorf("Expected response body to contain panic message, but it didn't:\n%s", body)
- }
- if len(body) < 500 {
- t.Errorf("Expected response body to contain stack trace, but it was too short: len=%d", len(body))
- }
-}
-
-func TestGenericErrorPage(t *testing.T) {
- // create temporary generic error page
- const genericErrorContent = "This is a generic error page"
-
- genericErrorPagePath, err := createErrorPageFile("generic_error_test.html", genericErrorContent)
- if err != nil {
- t.Fatal(err)
- }
- defer os.Remove(genericErrorPagePath)
-
- // create temporary error page
- const notFoundErrorContent = "This is a error page"
-
- notFoundErrorPagePath, err := createErrorPageFile("not_found.html", notFoundErrorContent)
- if err != nil {
- t.Fatal(err)
- }
- defer os.Remove(notFoundErrorPagePath)
-
- buf := bytes.Buffer{}
- em := ErrorHandler{
- GenericErrorPage: genericErrorPagePath,
- ErrorPages: map[int]string{
- http.StatusNotFound: notFoundErrorPagePath,
- },
- Log: httpserver.NewTestLogger(&buf),
- }
-
- tests := []struct {
- next httpserver.Handler
- expectedCode int
- expectedBody string
- expectedLog string
- expectedErr error
- }{
- {
- next: genErrorHandler(http.StatusNotFound, nil, ""),
- expectedCode: 0,
- expectedBody: notFoundErrorContent,
- expectedLog: "",
- expectedErr: nil,
- },
- {
- next: genErrorHandler(http.StatusInternalServerError, nil, ""),
- expectedCode: 0,
- expectedBody: genericErrorContent,
- expectedLog: "",
- expectedErr: nil,
- },
- }
-
- req, err := http.NewRequest("GET", "/", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- for i, test := range tests {
- em.Next = test.next
- buf.Reset()
- rec := httptest.NewRecorder()
- code, err := em.ServeHTTP(rec, req)
-
- if err != test.expectedErr {
- t.Errorf("Test %d: Expected error %v, but got %v",
- i, test.expectedErr, err)
- }
- if code != test.expectedCode {
- t.Errorf("Test %d: Expected status code %d, but got %d",
- i, test.expectedCode, code)
- }
- if body := rec.Body.String(); body != test.expectedBody {
- t.Errorf("Test %d: Expected body %q, but got %q",
- i, test.expectedBody, body)
- }
- if log := buf.String(); !strings.Contains(log, test.expectedLog) {
- t.Errorf("Test %d: Expected log %q, but got %q",
- i, test.expectedLog, log)
- }
- }
-}
-
-func genErrorHandler(status int, err error, body string) httpserver.Handler {
- return httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- if len(body) > 0 {
- w.Header().Set("Content-Length", strconv.Itoa(len(body)))
- fmt.Fprint(w, body)
- }
- return status, err
- })
-}
-
-func createErrorPageFile(name string, content string) (string, error) {
- errorPageFilePath := filepath.Join(os.TempDir(), name)
- f, err := os.Create(errorPageFilePath)
- if err != nil {
- return "", err
- }
-
- _, err = f.WriteString(content)
- if err != nil {
- return "", err
- }
- f.Close()
-
- return errorPageFilePath, nil
-}
diff --git a/caddyhttp/errors/setup.go b/caddyhttp/errors/setup.go
deleted file mode 100644
index 7844841579c..00000000000
--- a/caddyhttp/errors/setup.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package errors
-
-import (
- "log"
- "os"
- "path/filepath"
- "strconv"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// setup configures a new errors middleware instance.
-func setup(c *caddy.Controller) error {
- handler, err := errorsParse(c)
-
- if err != nil {
- return err
- }
-
- handler.Log.Attach(c)
-
- httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- handler.Next = next
- return handler
- })
-
- return nil
-}
-
-func errorsParse(c *caddy.Controller) (*ErrorHandler, error) {
-
- // Very important that we make a pointer because the startup
- // function that opens the log file must have access to the
- // same instance of the handler, not a copy.
- handler := &ErrorHandler{
- ErrorPages: make(map[int]string),
- Log: &httpserver.Logger{},
- }
-
- cfg := httpserver.GetConfig(c)
-
- optionalBlock := func() error {
- for c.NextBlock() {
-
- what := c.Val()
- where := c.RemainingArgs()
-
- if httpserver.IsLogRollerSubdirective(what) {
- var err error
- err = httpserver.ParseRoller(handler.Log.Roller, what, where...)
- if err != nil {
- return err
- }
- } else {
- if len(where) != 1 {
- return c.ArgErr()
- }
- where := where[0]
-
- // Error page; ensure it exists
- if !filepath.IsAbs(where) {
- where = filepath.Join(cfg.Root, where)
- }
-
- f, err := os.Open(where)
- if err != nil {
- log.Printf("[WARNING] Unable to open error page '%s': %v", where, err)
- }
- f.Close()
-
- if what == "*" {
- if handler.GenericErrorPage != "" {
- return c.Errf("Duplicate status code entry: %s", what)
- }
- handler.GenericErrorPage = where
- } else {
- whatInt, err := strconv.Atoi(what)
- if err != nil {
- return c.Err("Expecting a numeric status code or '*', got '" + what + "'")
- }
-
- if _, exists := handler.ErrorPages[whatInt]; exists {
- return c.Errf("Duplicate status code entry: %s", what)
- }
-
- handler.ErrorPages[whatInt] = where
- }
- }
- }
- return nil
- }
-
- for c.Next() {
- // weird hack to avoid having the handler values overwritten.
- if c.Val() == "}" {
- continue
- }
-
- args := c.RemainingArgs()
-
- if len(args) == 1 {
- switch args[0] {
- case "visible":
- handler.Debug = true
- default:
- handler.Log.Output = args[0]
- handler.Log.Roller = httpserver.DefaultLogRoller()
- }
- }
-
- // Configuration may be in a block
- err := optionalBlock()
- if err != nil {
- return handler, err
- }
- }
-
- return handler, nil
-}
diff --git a/caddyhttp/errors/setup_test.go b/caddyhttp/errors/setup_test.go
deleted file mode 100644
index db92f2b18ec..00000000000
--- a/caddyhttp/errors/setup_test.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package errors
-
-import (
- "path/filepath"
- "reflect"
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
- c := caddy.NewTestController("http", `errors`)
- err := setup(c)
- if err != nil {
- t.Errorf("Expected no errors, got: %v", err)
- }
- mids := httpserver.GetConfig(c).Middleware()
- if len(mids) == 0 {
- t.Fatal("Expected middlewares, was nil instead")
- }
-
- handler := mids[0](httpserver.EmptyNext)
- myHandler, ok := handler.(*ErrorHandler)
- if !ok {
- t.Fatalf("Expected handler to be type ErrorHandler, got: %#v", handler)
- }
-
- expectedLogger := &httpserver.Logger{}
-
- if !reflect.DeepEqual(expectedLogger, myHandler.Log) {
- t.Errorf("Expected '%v' as the default Log, got: '%v'", expectedLogger, myHandler.Log)
- }
-
- if !httpserver.SameNext(myHandler.Next, httpserver.EmptyNext) {
- t.Error("'Next' field of handler was not set properly")
- }
-
- // Test Startup function -- TODO
- // if len(c.Startup) == 0 {
- // t.Fatal("Expected 1 startup function, had 0")
- // }
- // c.Startup[0]()
- // if myHandler.Log == nil {
- // t.Error("Expected Log to be non-nil after startup because Debug is not enabled")
- // }
-}
-
-func TestErrorsParse(t *testing.T) {
- testAbs, err := filepath.Abs("./404.html")
- if err != nil {
- t.Error(err)
- }
- tests := []struct {
- inputErrorsRules string
- shouldErr bool
- expectedErrorHandler ErrorHandler
- }{
- {`errors`, false, ErrorHandler{
- ErrorPages: map[int]string{},
- Log: &httpserver.Logger{},
- }},
- {`errors errors.txt`, false, ErrorHandler{
- ErrorPages: map[int]string{},
- Log: &httpserver.Logger{
- Output: "errors.txt",
- Roller: httpserver.DefaultLogRoller(),
- },
- }},
- {`errors visible`, false, ErrorHandler{
- ErrorPages: map[int]string{},
- Debug: true,
- Log: &httpserver.Logger{},
- }},
- {`errors errors.txt {
- 404 404.html
- 500 500.html
-}`, false, ErrorHandler{
- ErrorPages: map[int]string{
- 404: "404.html",
- 500: "500.html",
- },
- Log: &httpserver.Logger{
- Output: "errors.txt",
- Roller: httpserver.DefaultLogRoller(),
- },
- }},
- {`errors errors.txt {
- rotate_size 2
- rotate_age 10
- rotate_keep 3
- rotate_compress
- }`, false, ErrorHandler{
- ErrorPages: map[int]string{},
- Log: &httpserver.Logger{
- Output: "errors.txt", Roller: &httpserver.LogRoller{
- MaxSize: 2,
- MaxAge: 10,
- MaxBackups: 3,
- Compress: true,
- LocalTime: true,
- },
- },
- }},
- {`errors errors.txt {
- rotate_size 3
- rotate_age 11
- rotate_keep 5
- 404 404.html
- 503 503.html
-}`, false, ErrorHandler{
- ErrorPages: map[int]string{
- 404: "404.html",
- 503: "503.html",
- },
- Log: &httpserver.Logger{
- Output: "errors.txt",
- Roller: &httpserver.LogRoller{
- MaxSize: 3,
- MaxAge: 11,
- MaxBackups: 5,
- Compress: false,
- LocalTime: true,
- },
- },
- }},
- {`errors errors.txt {
- * generic_error.html
- 404 404.html
- 503 503.html
-}`, false, ErrorHandler{
- Log: &httpserver.Logger{
- Output: "errors.txt",
- Roller: httpserver.DefaultLogRoller(),
- },
- GenericErrorPage: "generic_error.html",
- ErrorPages: map[int]string{
- 404: "404.html",
- 503: "503.html",
- },
- }},
- // test absolute file path
- {`errors {
- 404 ` + testAbs + `
- }`,
- false, ErrorHandler{
- ErrorPages: map[int]string{
- 404: testAbs,
- },
- Log: &httpserver.Logger{},
- }},
- {`errors errors.txt { rotate_size 2 rotate_age 10 rotate_keep 3 rotate_compress }`,
- true, ErrorHandler{ErrorPages: map[int]string{}, Log: &httpserver.Logger{}}},
- {`errors errors.txt {
- rotate_compress invalid
- }`,
- true, ErrorHandler{ErrorPages: map[int]string{}, Log: &httpserver.Logger{}}},
- // Next two test cases is the detection of duplicate status codes
- {`errors {
- 503 503.html
- 503 503.html
- }`, true, ErrorHandler{ErrorPages: map[int]string{}, Log: &httpserver.Logger{}}},
-
- {`errors {
- * generic_error.html
- * generic_error.html
- }`, true, ErrorHandler{ErrorPages: map[int]string{}, Log: &httpserver.Logger{}}},
- }
-
- for i, test := range tests {
- actualErrorsRule, err := errorsParse(caddy.NewTestController("http", test.inputErrorsRules))
-
- if err == nil && test.shouldErr {
- t.Errorf("Test %d didn't error, but it should have", i)
- } else if err != nil && !test.shouldErr {
- t.Errorf("Test %d errored, but it shouldn't have; got '%v'", i, err)
- } else if err != nil && test.shouldErr {
- continue
- }
- if !reflect.DeepEqual(actualErrorsRule, &test.expectedErrorHandler) {
- t.Errorf("Test %d expect %v, but got %v", i,
- test.expectedErrorHandler, actualErrorsRule)
- }
- }
-}
diff --git a/caddyhttp/expvar/expvar.go b/caddyhttp/expvar/expvar.go
deleted file mode 100644
index d3107a0489e..00000000000
--- a/caddyhttp/expvar/expvar.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package expvar
-
-import (
- "expvar"
- "fmt"
- "net/http"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// ExpVar is a simple struct to hold expvar's configuration
-type ExpVar struct {
- Next httpserver.Handler
- Resource Resource
-}
-
-// ServeHTTP handles requests to expvar's configured entry point with
-// expvar, or passes all other requests up the chain.
-func (e ExpVar) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- if httpserver.Path(r.URL.Path).Matches(string(e.Resource)) {
- expvarHandler(w, r)
- return 0, nil
- }
- return e.Next.ServeHTTP(w, r)
-}
-
-// expvarHandler returns a JSON object will all the published variables.
-//
-// This is lifted straight from the expvar package.
-func expvarHandler(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
- fmt.Fprintf(w, "{\n")
- first := true
- expvar.Do(func(kv expvar.KeyValue) {
- if !first {
- fmt.Fprintf(w, ",\n")
- }
- first = false
- fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
- })
- fmt.Fprintf(w, "\n}\n")
-}
-
-// Resource contains the path to the expvar entry point
-type Resource string
diff --git a/caddyhttp/expvar/expvar_test.go b/caddyhttp/expvar/expvar_test.go
deleted file mode 100644
index dfc7cb31184..00000000000
--- a/caddyhttp/expvar/expvar_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package expvar
-
-import (
- "fmt"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestExpVar(t *testing.T) {
- rw := ExpVar{
- Next: httpserver.HandlerFunc(contentHandler),
- Resource: "/d/v",
- }
-
- tests := []struct {
- from string
- result int
- }{
- {"/d/v", 0},
- {"/x/y", http.StatusOK},
- }
-
- for i, test := range tests {
- req, err := http.NewRequest("GET", test.from, nil)
- if err != nil {
- t.Fatalf("Test %d: Could not create HTTP request %v", i, err)
- }
- rec := httptest.NewRecorder()
- result, err := rw.ServeHTTP(rec, req)
- if err != nil {
- t.Fatalf("Test %d: Could not ServeHTTP %v", i, err)
- }
- if result != test.result {
- t.Errorf("Test %d: Expected Header '%d' but was '%d'",
- i, test.result, result)
- }
- }
-}
-
-func contentHandler(w http.ResponseWriter, r *http.Request) (int, error) {
- fmt.Fprintf(w, r.URL.String())
- return http.StatusOK, nil
-}
diff --git a/caddyhttp/expvar/setup.go b/caddyhttp/expvar/setup.go
deleted file mode 100644
index e411844e917..00000000000
--- a/caddyhttp/expvar/setup.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package expvar
-
-import (
- "expvar"
- "runtime"
- "sync"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("expvar", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// setup configures a new ExpVar middleware instance.
-func setup(c *caddy.Controller) error {
- resource, err := expVarParse(c)
- if err != nil {
- return err
- }
-
- // publish any extra information/metrics we may want to capture
- publishExtraVars()
-
- ev := ExpVar{Resource: resource}
-
- httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- ev.Next = next
- return ev
- })
-
- return nil
-}
-
-func expVarParse(c *caddy.Controller) (Resource, error) {
- var resource Resource
- var err error
-
- for c.Next() {
- args := c.RemainingArgs()
- switch len(args) {
- case 0:
- resource = Resource(defaultExpvarPath)
- case 1:
- resource = Resource(args[0])
- default:
- return resource, c.ArgErr()
- }
- }
-
- return resource, err
-}
-
-func publishExtraVars() {
- // By using sync.Once instead of an init() function, we don't clutter
- // the app's expvar export unnecessarily, or risk colliding with it.
- publishOnce.Do(func() {
- expvar.Publish("Goroutines", expvar.Func(func() interface{} {
- return runtime.NumGoroutine()
- }))
- })
-}
-
-var publishOnce sync.Once // publishing variables should only be done once
-var defaultExpvarPath = "/debug/vars"
diff --git a/caddyhttp/expvar/setup_test.go b/caddyhttp/expvar/setup_test.go
deleted file mode 100644
index b53719ebcd4..00000000000
--- a/caddyhttp/expvar/setup_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package expvar
-
-import (
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
- c := caddy.NewTestController("http", `expvar`)
- err := setup(c)
- if err != nil {
- t.Errorf("Expected no errors, got: %v", err)
- }
- mids := httpserver.GetConfig(c).Middleware()
- if len(mids) == 0 {
- t.Fatal("Expected middleware, got 0 instead")
- }
-
- c = caddy.NewTestController("http", `expvar /d/v`)
- err = setup(c)
- if err != nil {
- t.Errorf("Expected no errors, got: %v", err)
- }
- mids = httpserver.GetConfig(c).Middleware()
- if len(mids) == 0 {
- t.Fatal("Expected middleware, got 0 instead")
- }
-
- handler := mids[0](httpserver.EmptyNext)
- myHandler, ok := handler.(ExpVar)
- if !ok {
- t.Fatalf("Expected handler to be type ExpVar, got: %#v", handler)
- }
- if myHandler.Resource != "/d/v" {
- t.Errorf("Expected /d/v as expvar resource")
- }
- if !httpserver.SameNext(myHandler.Next, httpserver.EmptyNext) {
- t.Error("'Next' field of handler was not set properly")
- }
-}
diff --git a/caddyhttp/extensions/ext.go b/caddyhttp/extensions/ext.go
deleted file mode 100644
index 2c02fa73473..00000000000
--- a/caddyhttp/extensions/ext.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Package extensions contains middleware for clean URLs.
-//
-// The root path of the site is passed in as well as possible extensions
-// to try internally for paths requested that don't match an existing
-// resource. The first path+ext combination that matches a valid file
-// will be used.
-package extensions
-
-import (
- "net/http"
- "os"
- "path"
- "strings"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// Ext can assume an extension from clean URLs.
-// It tries extensions in the order listed in Extensions.
-type Ext struct {
- // Next handler in the chain
- Next httpserver.Handler
-
- // Path to site root
- Root string
-
- // List of extensions to try
- Extensions []string
-}
-
-// ServeHTTP implements the httpserver.Handler interface.
-func (e Ext) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- urlpath := strings.TrimSuffix(r.URL.Path, "/")
- if len(r.URL.Path) > 0 && path.Ext(urlpath) == "" && r.URL.Path[len(r.URL.Path)-1] != '/' {
- for _, ext := range e.Extensions {
- _, err := os.Stat(httpserver.SafePath(e.Root, urlpath) + ext)
- if err == nil {
- r.URL.Path = urlpath + ext
- break
- }
- }
- }
- return e.Next.ServeHTTP(w, r)
-}
diff --git a/caddyhttp/extensions/setup.go b/caddyhttp/extensions/setup.go
deleted file mode 100644
index 5cec873a0c4..00000000000
--- a/caddyhttp/extensions/setup.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package extensions
-
-import (
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("ext", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// setup configures a new instance of 'extensions' middleware for clean URLs.
-func setup(c *caddy.Controller) error {
- cfg := httpserver.GetConfig(c)
- root := cfg.Root
-
- exts, err := extParse(c)
- if err != nil {
- return err
- }
-
- httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- return Ext{
- Next: next,
- Extensions: exts,
- Root: root,
- }
- })
-
- return nil
-}
-
-// extParse sets up an instance of extension middleware
-// from a middleware controller and returns a list of extensions.
-func extParse(c *caddy.Controller) ([]string, error) {
- var exts []string
-
- for c.Next() {
- // At least one extension is required
- if !c.NextArg() {
- return exts, c.ArgErr()
- }
- exts = append(exts, c.Val())
-
- // Tack on any other extensions that may have been listed
- exts = append(exts, c.RemainingArgs()...)
- }
-
- return exts, nil
-}
diff --git a/caddyhttp/extensions/setup_test.go b/caddyhttp/extensions/setup_test.go
deleted file mode 100644
index 26c50e3c44a..00000000000
--- a/caddyhttp/extensions/setup_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package extensions
-
-import (
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
- c := caddy.NewTestController("http", `ext .html .htm .php`)
- err := setup(c)
- if err != nil {
- t.Fatalf("Expected no errors, got: %v", err)
- }
-
- mids := httpserver.GetConfig(c).Middleware()
- if len(mids) == 0 {
- t.Fatal("Expected middleware, had 0 instead")
- }
-
- handler := mids[0](httpserver.EmptyNext)
- myHandler, ok := handler.(Ext)
-
- if !ok {
- t.Fatalf("Expected handler to be type Ext, got: %#v", handler)
- }
-
- if myHandler.Extensions[0] != ".html" {
- t.Errorf("Expected .html in the list of Extensions")
- }
- if myHandler.Extensions[1] != ".htm" {
- t.Errorf("Expected .htm in the list of Extensions")
- }
- if myHandler.Extensions[2] != ".php" {
- t.Errorf("Expected .php in the list of Extensions")
- }
- if !httpserver.SameNext(myHandler.Next, httpserver.EmptyNext) {
- t.Error("'Next' field of handler was not set properly")
- }
-
-}
-
-func TestExtParse(t *testing.T) {
- tests := []struct {
- inputExts string
- shouldErr bool
- expectedExts []string
- }{
- {`ext .html .htm .php`, false, []string{".html", ".htm", ".php"}},
- {`ext .php .html .xml`, false, []string{".php", ".html", ".xml"}},
- {`ext .txt .php .xml`, false, []string{".txt", ".php", ".xml"}},
- }
- for i, test := range tests {
- actualExts, err := extParse(caddy.NewTestController("http", test.inputExts))
-
- if err == nil && test.shouldErr {
- t.Errorf("Test %d didn't error, but it should have", i)
- } else if err != nil && !test.shouldErr {
- t.Errorf("Test %d errored, but it shouldn't have; got '%v'", i, err)
- }
-
- if len(actualExts) != len(test.expectedExts) {
- t.Fatalf("Test %d expected %d rules, but got %d",
- i, len(test.expectedExts), len(actualExts))
- }
- for j, actualExt := range actualExts {
- if actualExt != test.expectedExts[j] {
- t.Fatalf("Test %d expected %dth extension to be %s , but got %s",
- i, j, test.expectedExts[j], actualExt)
- }
- }
- }
-
-}
diff --git a/caddyhttp/fastcgi/fastcgi.go b/caddyhttp/fastcgi/fastcgi.go
deleted file mode 100644
index 550a795b61a..00000000000
--- a/caddyhttp/fastcgi/fastcgi.go
+++ /dev/null
@@ -1,417 +0,0 @@
-// Package fastcgi has middleware that acts as a FastCGI client. Requests
-// that get forwarded to FastCGI stop the middleware execution chain.
-// The most common use for this package is to serve PHP websites via php-fpm.
-package fastcgi
-
-import (
- "context"
- "errors"
- "io"
- "net"
- "net/http"
- "net/url"
- "os"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "sync/atomic"
- "time"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// Handler is a middleware type that can handle requests as a FastCGI client.
-type Handler struct {
- Next httpserver.Handler
- Rules []Rule
- Root string
- FileSys http.FileSystem
-
- // These are sent to CGI scripts in env variables
- SoftwareName string
- SoftwareVersion string
- ServerName string
- ServerPort string
-}
-
-// ServeHTTP satisfies the httpserver.Handler interface.
-func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- for _, rule := range h.Rules {
- // First requirement: Base path must match request path. If it doesn't,
- // we check to make sure the leading slash is not missing, and if so,
- // we check again with it prepended. This is in case people forget
- // a leading slash when performing rewrites, and we don't want to expose
- // the contents of the (likely PHP) script. See issue #1645.
- hpath := httpserver.Path(r.URL.Path)
- if !hpath.Matches(rule.Path) {
- if strings.HasPrefix(string(hpath), "/") {
- // this is a normal-looking path, and it doesn't match; try next rule
- continue
- }
- hpath = httpserver.Path("/" + string(hpath)) // prepend leading slash
- if !hpath.Matches(rule.Path) {
- // even after fixing the request path, it still doesn't match; try next rule
- continue
- }
- }
- // The path must also be allowed (not ignored).
- if !rule.AllowedPath(r.URL.Path) {
- continue
- }
-
- // In addition to matching the path, a request must meet some
- // other criteria before being proxied as FastCGI. For example,
- // we probably want to exclude static assets (CSS, JS, images...)
- // but we also want to be flexible for the script we proxy to.
-
- fpath := r.URL.Path
-
- if idx, ok := httpserver.IndexFile(h.FileSys, fpath, rule.IndexFiles); ok {
- fpath = idx
- // Index file present.
- // If request path cannot be split, return error.
- if !rule.canSplit(fpath) {
- return http.StatusInternalServerError, ErrIndexMissingSplit
- }
- } else {
- // No index file present.
- // If request path cannot be split, ignore request.
- if !rule.canSplit(fpath) {
- continue
- }
- }
-
- // These criteria work well in this order for PHP sites
- if !h.exists(fpath) || fpath[len(fpath)-1] == '/' || strings.HasSuffix(fpath, rule.Ext) {
-
- // Create environment for CGI script
- env, err := h.buildEnv(r, rule, fpath)
- if err != nil {
- return http.StatusInternalServerError, err
- }
-
- // Connect to FastCGI gateway
- network, address := parseAddress(rule.Address())
-
- ctx := context.Background()
- if rule.ConnectTimeout > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, rule.ConnectTimeout)
- defer cancel()
- }
-
- fcgiBackend, err := DialContext(ctx, network, address)
- if err != nil {
- return http.StatusBadGateway, err
- }
- defer fcgiBackend.Close()
-
- // read/write timeouts
- if err := fcgiBackend.SetReadTimeout(rule.ReadTimeout); err != nil {
- return http.StatusInternalServerError, err
- }
- if err := fcgiBackend.SetSendTimeout(rule.SendTimeout); err != nil {
- return http.StatusInternalServerError, err
- }
-
- var resp *http.Response
-
- var contentLength int64
- // if ContentLength is already set
- if r.ContentLength > 0 {
- contentLength = r.ContentLength
- } else {
- contentLength, _ = strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64)
- }
- switch r.Method {
- case "HEAD":
- resp, err = fcgiBackend.Head(env)
- case "GET":
- resp, err = fcgiBackend.Get(env)
- case "OPTIONS":
- resp, err = fcgiBackend.Options(env)
- default:
- resp, err = fcgiBackend.Post(env, r.Method, r.Header.Get("Content-Type"), r.Body, contentLength)
- }
-
- if resp != nil && resp.Body != nil {
- defer resp.Body.Close()
- }
-
- if err != nil {
- if err, ok := err.(net.Error); ok && err.Timeout() {
- return http.StatusGatewayTimeout, err
- } else if err != io.EOF {
- return http.StatusBadGateway, err
- }
- }
-
- // Write response header
- writeHeader(w, resp)
-
- // Write the response body
- _, err = io.Copy(w, resp.Body)
- if err != nil {
- return http.StatusBadGateway, err
- }
-
- // Log any stderr output from upstream
- if fcgiBackend.stderr.Len() != 0 {
- // Remove trailing newline, error logger already does this.
- err = LogError(strings.TrimSuffix(fcgiBackend.stderr.String(), "\n"))
- }
-
- // Normally we would return the status code if it is an error status (>= 400),
- // however, upstream FastCGI apps don't know about our contract and have
- // probably already written an error page. So we just return 0, indicating
- // that the response body is already written. However, we do return any
- // error value so it can be logged.
- // Note that the proxy middleware works the same way, returning status=0.
- return 0, err
- }
- }
-
- return h.Next.ServeHTTP(w, r)
-}
-
-// parseAddress returns the network and address of fcgiAddress.
-// The first string is the network, "tcp" or "unix", implied from the scheme and address.
-// The second string is fcgiAddress, with scheme prefixes removed.
-// The two returned strings can be used as parameters to the Dial() function.
-func parseAddress(fcgiAddress string) (string, string) {
- // check if address has tcp scheme explicitly set
- if strings.HasPrefix(fcgiAddress, "tcp://") {
- return "tcp", fcgiAddress[len("tcp://"):]
- }
- // check if address has fastcgi scheme explicitly set
- if strings.HasPrefix(fcgiAddress, "fastcgi://") {
- return "tcp", fcgiAddress[len("fastcgi://"):]
- }
- // check if unix socket
- if trim := strings.HasPrefix(fcgiAddress, "unix"); strings.HasPrefix(fcgiAddress, "/") || trim {
- if trim {
- return "unix", fcgiAddress[len("unix:"):]
- }
- return "unix", fcgiAddress
- }
- // default case, a plain tcp address with no scheme
- return "tcp", fcgiAddress
-}
-
-func writeHeader(w http.ResponseWriter, r *http.Response) {
- for key, vals := range r.Header {
- for _, val := range vals {
- w.Header().Add(key, val)
- }
- }
- w.WriteHeader(r.StatusCode)
-}
-
-func (h Handler) exists(path string) bool {
- if _, err := os.Stat(h.Root + path); err == nil {
- return true
- }
- return false
-}
-
-// buildEnv returns a set of CGI environment variables for the request.
-func (h Handler) buildEnv(r *http.Request, rule Rule, fpath string) (map[string]string, error) {
- var env map[string]string
-
- // Get absolute path of requested resource
- absPath := filepath.Join(rule.Root, fpath)
-
- // Separate remote IP and port; more lenient than net.SplitHostPort
- var ip, port string
- if idx := strings.LastIndex(r.RemoteAddr, ":"); idx > -1 {
- ip = r.RemoteAddr[:idx]
- port = r.RemoteAddr[idx+1:]
- } else {
- ip = r.RemoteAddr
- }
-
- // Remove [] from IPv6 addresses
- ip = strings.Replace(ip, "[", "", 1)
- ip = strings.Replace(ip, "]", "", 1)
-
- // Split path in preparation for env variables.
- // Previous rule.canSplit checks ensure this can never be -1.
- splitPos := rule.splitPos(fpath)
-
- // Request has the extension; path was split successfully
- docURI := fpath[:splitPos+len(rule.SplitPath)]
- pathInfo := fpath[splitPos+len(rule.SplitPath):]
- scriptName := fpath
- scriptFilename := absPath
-
- // Strip PATH_INFO from SCRIPT_NAME
- scriptName = strings.TrimSuffix(scriptName, pathInfo)
-
- // Get the request URI from context. The context stores the original URI in case
- // it was changed by a middleware such as rewrite. By default, we pass the
- // original URI in as the value of REQUEST_URI (the user can overwrite this
- // if desired). Most PHP apps seem to want the original URI. Besides, this is
- // how nginx defaults: http://stackoverflow.com/a/12485156/1048862
- reqURL, _ := r.Context().Value(httpserver.OriginalURLCtxKey).(url.URL)
-
- // Retrieve name of remote user that was set by some downstream middleware such as basicauth.
- remoteUser, _ := r.Context().Value(httpserver.RemoteUserCtxKey).(string)
-
- // Some variables are unused but cleared explicitly to prevent
- // the parent environment from interfering.
- env = map[string]string{
- // Variables defined in CGI 1.1 spec
- "AUTH_TYPE": "", // Not used
- "CONTENT_LENGTH": r.Header.Get("Content-Length"),
- "CONTENT_TYPE": r.Header.Get("Content-Type"),
- "GATEWAY_INTERFACE": "CGI/1.1",
- "PATH_INFO": pathInfo,
- "QUERY_STRING": r.URL.RawQuery,
- "REMOTE_ADDR": ip,
- "REMOTE_HOST": ip, // For speed, remote host lookups disabled
- "REMOTE_PORT": port,
- "REMOTE_IDENT": "", // Not used
- "REMOTE_USER": remoteUser,
- "REQUEST_METHOD": r.Method,
- "SERVER_NAME": h.ServerName,
- "SERVER_PORT": h.ServerPort,
- "SERVER_PROTOCOL": r.Proto,
- "SERVER_SOFTWARE": h.SoftwareName + "/" + h.SoftwareVersion,
-
- // Other variables
- "DOCUMENT_ROOT": rule.Root,
- "DOCUMENT_URI": docURI,
- "HTTP_HOST": r.Host, // added here, since not always part of headers
- "REQUEST_URI": reqURL.RequestURI(),
- "SCRIPT_FILENAME": scriptFilename,
- "SCRIPT_NAME": scriptName,
- }
-
- // compliance with the CGI specification requires that
- // PATH_TRANSLATED should only exist if PATH_INFO is defined.
- // Info: https://www.ietf.org/rfc/rfc3875 Page 14
- if env["PATH_INFO"] != "" {
- env["PATH_TRANSLATED"] = filepath.Join(rule.Root, pathInfo) // Info: http://www.oreilly.com/openbook/cgi/ch02_04.html
- }
-
- // Some web apps rely on knowing HTTPS or not
- if r.TLS != nil {
- env["HTTPS"] = "on"
- }
-
- // Add env variables from config (with support for placeholders in values)
- replacer := httpserver.NewReplacer(r, nil, "")
- for _, envVar := range rule.EnvVars {
- env[envVar[0]] = replacer.Replace(envVar[1])
- }
-
- // Add all HTTP headers to env variables
- for field, val := range r.Header {
- header := strings.ToUpper(field)
- header = headerNameReplacer.Replace(header)
- env["HTTP_"+header] = strings.Join(val, ", ")
- }
- return env, nil
-}
-
-// Rule represents a FastCGI handling rule.
-// It is parsed from the fastcgi directive in the Caddyfile, see setup.go.
-type Rule struct {
- // The base path to match. Required.
- Path string
-
- // upstream load balancer
- balancer
-
- // Always process files with this extension with fastcgi.
- Ext string
-
- // Use this directory as the fastcgi root directory. Defaults to the root
- // directory of the parent virtual host.
- Root string
-
- // The path in the URL will be split into two, with the first piece ending
- // with the value of SplitPath. The first piece will be assumed as the
- // actual resource (CGI script) name, and the second piece will be set to
- // PATH_INFO for the CGI script to use.
- SplitPath string
-
- // If the URL ends with '/' (which indicates a directory), these index
- // files will be tried instead.
- IndexFiles []string
-
- // Environment Variables
- EnvVars [][2]string
-
- // Ignored paths
- IgnoredSubPaths []string
-
- // The duration used to set a deadline when connecting to an upstream.
- ConnectTimeout time.Duration
-
- // The duration used to set a deadline when reading from the FastCGI server.
- ReadTimeout time.Duration
-
- // The duration used to set a deadline when sending to the FastCGI server.
- SendTimeout time.Duration
-}
-
-// balancer is a fastcgi upstream load balancer.
-type balancer interface {
- // Address picks an upstream address from the
- // underlying load balancer.
- Address() string
-}
-
-// roundRobin is a round robin balancer for fastcgi upstreams.
-type roundRobin struct {
- // Known Go bug: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
- // must be first field for 64 bit alignment
- // on x86 and arm.
- index int64
- addresses []string
-}
-
-func (r *roundRobin) Address() string {
- index := atomic.AddInt64(&r.index, 1) % int64(len(r.addresses))
- return r.addresses[index]
-}
-
-// canSplit checks if path can split into two based on rule.SplitPath.
-func (r Rule) canSplit(path string) bool {
- return r.splitPos(path) >= 0
-}
-
-// splitPos returns the index where path should be split
-// based on rule.SplitPath.
-func (r Rule) splitPos(path string) int {
- if httpserver.CaseSensitivePath {
- return strings.Index(path, r.SplitPath)
- }
- return strings.Index(strings.ToLower(path), strings.ToLower(r.SplitPath))
-}
-
-// AllowedPath checks if requestPath is not an ignored path.
-func (r Rule) AllowedPath(requestPath string) bool {
- for _, ignoredSubPath := range r.IgnoredSubPaths {
- if httpserver.Path(path.Clean(requestPath)).Matches(path.Join(r.Path, ignoredSubPath)) {
- return false
- }
- }
- return true
-}
-
-var (
- headerNameReplacer = strings.NewReplacer(" ", "_", "-", "_")
- // ErrIndexMissingSplit describes an index configuration error.
- ErrIndexMissingSplit = errors.New("configured index file(s) must include split value")
-)
-
-// LogError is a non fatal error that allows requests to go through.
-type LogError string
-
-// Error satisfies error interface.
-func (l LogError) Error() string {
- return string(l)
-}
diff --git a/caddyhttp/fastcgi/fastcgi_test.go b/caddyhttp/fastcgi/fastcgi_test.go
deleted file mode 100644
index 6f7afe74297..00000000000
--- a/caddyhttp/fastcgi/fastcgi_test.go
+++ /dev/null
@@ -1,353 +0,0 @@
-package fastcgi
-
-import (
- "context"
- "net"
- "net/http"
- "net/http/fcgi"
- "net/http/httptest"
- "net/url"
- "strconv"
- "sync"
- "testing"
- "time"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestServeHTTP(t *testing.T) {
- body := "This is some test body content"
-
- bodyLenStr := strconv.Itoa(len(body))
- listener, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("Unable to create listener for test: %v", err)
- }
- defer listener.Close()
- go fcgi.Serve(listener, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Length", bodyLenStr)
- w.Write([]byte(body))
- }))
-
- handler := Handler{
- Next: nil,
- Rules: []Rule{{Path: "/", balancer: address(listener.Addr().String())}},
- }
- r, err := http.NewRequest("GET", "/", nil)
- if err != nil {
- t.Fatalf("Unable to create request: %v", err)
- }
- w := httptest.NewRecorder()
-
- status, err := handler.ServeHTTP(w, r)
-
- if got, want := status, 0; got != want {
- t.Errorf("Expected returned status code to be %d, got %d", want, got)
- }
- if err != nil {
- t.Errorf("Expected nil error, got: %v", err)
- }
- if got, want := w.Header().Get("Content-Length"), bodyLenStr; got != want {
- t.Errorf("Expected Content-Length to be '%s', got: '%s'", want, got)
- }
- if got, want := w.Body.String(), body; got != want {
- t.Errorf("Expected response body to be '%s', got: '%s'", want, got)
- }
-}
-
-func TestRuleParseAddress(t *testing.T) {
- getClientTestTable := []struct {
- rule *Rule
- expectednetwork string
- expectedaddress string
- }{
- {&Rule{balancer: address("tcp://172.17.0.1:9000")}, "tcp", "172.17.0.1:9000"},
- {&Rule{balancer: address("fastcgi://localhost:9000")}, "tcp", "localhost:9000"},
- {&Rule{balancer: address("172.17.0.15")}, "tcp", "172.17.0.15"},
- {&Rule{balancer: address("/my/unix/socket")}, "unix", "/my/unix/socket"},
- {&Rule{balancer: address("unix:/second/unix/socket")}, "unix", "/second/unix/socket"},
- }
-
- for _, entry := range getClientTestTable {
- if actualnetwork, _ := parseAddress(entry.rule.Address()); actualnetwork != entry.expectednetwork {
- t.Errorf("Unexpected network for address string %v. Got %v, expected %v", entry.rule.Address(), actualnetwork, entry.expectednetwork)
- }
- if _, actualaddress := parseAddress(entry.rule.Address()); actualaddress != entry.expectedaddress {
- t.Errorf("Unexpected parsed address for address string %v. Got %v, expected %v", entry.rule.Address(), actualaddress, entry.expectedaddress)
- }
- }
-}
-
-func TestRuleIgnoredPath(t *testing.T) {
- rule := &Rule{
- Path: "/fastcgi",
- IgnoredSubPaths: []string{"/download", "/static"},
- }
- tests := []struct {
- url string
- expected bool
- }{
- {"/fastcgi", true},
- {"/fastcgi/dl", true},
- {"/fastcgi/download", false},
- {"/fastcgi/download/static", false},
- {"/fastcgi/static", false},
- {"/fastcgi/static/download", false},
- {"/fastcgi/something/download", true},
- {"/fastcgi/something/static", true},
- {"/fastcgi//static", false},
- {"/fastcgi//static//download", false},
- {"/fastcgi//download", false},
- }
-
- for i, test := range tests {
- allowed := rule.AllowedPath(test.url)
- if test.expected != allowed {
- t.Errorf("Test %d: expected %v found %v", i, test.expected, allowed)
- }
- }
-}
-
-func TestBuildEnv(t *testing.T) {
- testBuildEnv := func(r *http.Request, rule Rule, fpath string, envExpected map[string]string) {
- var h Handler
- env, err := h.buildEnv(r, rule, fpath)
- if err != nil {
- t.Error("Unexpected error:", err.Error())
- }
- for k, v := range envExpected {
- if env[k] != v {
- t.Errorf("Unexpected %v. Got %v, expected %v", k, env[k], v)
- }
- }
- }
-
- rule := Rule{}
- url, err := url.Parse("http://localhost:2015/fgci_test.php?test=foobar")
- if err != nil {
- t.Error("Unexpected error:", err.Error())
- }
-
- var newReq = func() *http.Request {
- r := http.Request{
- Method: "GET",
- URL: url,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Host: "localhost:2015",
- RemoteAddr: "[2b02:1810:4f2d:9400:70ab:f822:be8a:9093]:51688",
- RequestURI: "/fgci_test.php",
- Header: map[string][]string{
- "Foo": {"Bar", "two"},
- },
- }
- ctx := context.WithValue(r.Context(), httpserver.OriginalURLCtxKey, *r.URL)
- return r.WithContext(ctx)
- }
-
- fpath := "/fgci_test.php"
-
- var newEnv = func() map[string]string {
- return map[string]string{
- "REMOTE_ADDR": "2b02:1810:4f2d:9400:70ab:f822:be8a:9093",
- "REMOTE_PORT": "51688",
- "SERVER_PROTOCOL": "HTTP/1.1",
- "QUERY_STRING": "test=foobar",
- "REQUEST_METHOD": "GET",
- "HTTP_HOST": "localhost:2015",
- }
- }
-
- // request
- var r *http.Request
-
- // expected environment variables
- var envExpected map[string]string
-
- // 1. Test for full canonical IPv6 address
- r = newReq()
- testBuildEnv(r, rule, fpath, envExpected)
-
- // 2. Test for shorthand notation of IPv6 address
- r = newReq()
- r.RemoteAddr = "[::1]:51688"
- envExpected = newEnv()
- envExpected["REMOTE_ADDR"] = "::1"
- testBuildEnv(r, rule, fpath, envExpected)
-
- // 3. Test for IPv4 address
- r = newReq()
- r.RemoteAddr = "192.168.0.10:51688"
- envExpected = newEnv()
- envExpected["REMOTE_ADDR"] = "192.168.0.10"
- testBuildEnv(r, rule, fpath, envExpected)
-
- // 4. Test for environment variable
- r = newReq()
- rule.EnvVars = [][2]string{
- {"HTTP_HOST", "localhost:2016"},
- {"REQUEST_METHOD", "POST"},
- }
- envExpected = newEnv()
- envExpected["HTTP_HOST"] = "localhost:2016"
- envExpected["REQUEST_METHOD"] = "POST"
- testBuildEnv(r, rule, fpath, envExpected)
-
- // 5. Test for environment variable placeholders
- r = newReq()
- rule.EnvVars = [][2]string{
- {"HTTP_HOST", "{host}"},
- {"CUSTOM_URI", "custom_uri{uri}"},
- {"CUSTOM_QUERY", "custom=true&{query}"},
- }
- envExpected = newEnv()
- envExpected["HTTP_HOST"] = "localhost:2015"
- envExpected["CUSTOM_URI"] = "custom_uri/fgci_test.php?test=foobar"
- envExpected["CUSTOM_QUERY"] = "custom=true&test=foobar"
- testBuildEnv(r, rule, fpath, envExpected)
-}
-
-func TestReadTimeout(t *testing.T) {
- tests := []struct {
- sleep time.Duration
- readTimeout time.Duration
- shouldErr bool
- }{
- {75 * time.Millisecond, 50 * time.Millisecond, true},
- {0, -1 * time.Second, true},
- {0, time.Minute, false},
- }
-
- var wg sync.WaitGroup
-
- for i, test := range tests {
- listener, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("Test %d: Unable to create listener for test: %v", i, err)
- }
- defer listener.Close()
-
- handler := Handler{
- Next: nil,
- Rules: []Rule{
- {
- Path: "/",
- balancer: address(listener.Addr().String()),
- ReadTimeout: test.readTimeout,
- },
- },
- }
- r, err := http.NewRequest("GET", "/", nil)
- if err != nil {
- t.Fatalf("Test %d: Unable to create request: %v", i, err)
- }
- w := httptest.NewRecorder()
-
- wg.Add(1)
- go fcgi.Serve(listener, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- time.Sleep(test.sleep)
- w.WriteHeader(http.StatusOK)
- wg.Done()
- }))
-
- got, err := handler.ServeHTTP(w, r)
- if test.shouldErr {
- if err == nil {
- t.Errorf("Test %d: Expected i/o timeout error but had none", i)
- } else if err, ok := err.(net.Error); !ok || !err.Timeout() {
- t.Errorf("Test %d: Expected i/o timeout error, got: '%s'", i, err.Error())
- }
-
- want := http.StatusGatewayTimeout
- if got != want {
- t.Errorf("Test %d: Expected returned status code to be %d, got: %d",
- i, want, got)
- }
- } else if err != nil {
- t.Errorf("Test %d: Expected nil error, got: %v", i, err)
- }
-
- wg.Wait()
- }
-}
-
-func TestSendTimeout(t *testing.T) {
- tests := []struct {
- sendTimeout time.Duration
- shouldErr bool
- }{
- {-1 * time.Second, true},
- {time.Minute, false},
- }
-
- for i, test := range tests {
- listener, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("Test %d: Unable to create listener for test: %v", i, err)
- }
- defer listener.Close()
-
- handler := Handler{
- Next: nil,
- Rules: []Rule{
- {
- Path: "/",
- balancer: address(listener.Addr().String()),
- SendTimeout: test.sendTimeout,
- },
- },
- }
- r, err := http.NewRequest("GET", "/", nil)
- if err != nil {
- t.Fatalf("Test %d: Unable to create request: %v", i, err)
- }
- w := httptest.NewRecorder()
-
- go fcgi.Serve(listener, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(http.StatusOK)
- }))
-
- got, err := handler.ServeHTTP(w, r)
- if test.shouldErr {
- if err == nil {
- t.Errorf("Test %d: Expected i/o timeout error but had none", i)
- } else if err, ok := err.(net.Error); !ok || !err.Timeout() {
- t.Errorf("Test %d: Expected i/o timeout error, got: '%s'", i, err.Error())
- }
-
- want := http.StatusGatewayTimeout
- if got != want {
- t.Errorf("Test %d: Expected returned status code to be %d, got: %d",
- i, want, got)
- }
- } else if err != nil {
- t.Errorf("Test %d: Expected nil error, got: %v", i, err)
- }
- }
-}
-
-func TestBalancer(t *testing.T) {
- tests := [][]string{
- {"localhost", "host.local"},
- {"localhost"},
- {"localhost", "host.local", "example.com"},
- {"localhost", "host.local", "example.com", "127.0.0.1"},
- }
- for i, test := range tests {
- b := address(test...)
- for _, host := range test {
- a := b.Address()
- if a != host {
- t.Errorf("Test %d: expected %s, found %s", i, host, a)
- }
- }
- }
-}
-
-func address(addresses ...string) balancer {
- return &roundRobin{
- addresses: addresses,
- index: -1,
- }
-}
diff --git a/caddyhttp/fastcgi/fcgi_test.php b/caddyhttp/fastcgi/fcgi_test.php
deleted file mode 100644
index 3f5e5f2db20..00000000000
--- a/caddyhttp/fastcgi/fcgi_test.php
+++ /dev/null
@@ -1,79 +0,0 @@
- $val) {
- $md5 = md5($val);
-
- if ($key != $md5) {
- $stat = "FAILED";
- echo "server:err ".$md5." != ".$key."\n";
- }
-
- $length += strlen($key) + strlen($val);
-
- $ret .= $key."(".strlen($key).") ";
- }
- $ret .= "] [";
- foreach ($_FILES as $k0 => $val) {
-
- $error = $val["error"];
- if ($error == UPLOAD_ERR_OK) {
- $tmp_name = $val["tmp_name"];
- $name = $val["name"];
- $datafile = "/tmp/test.go";
- move_uploaded_file($tmp_name, $datafile);
- $md5 = md5_file($datafile);
-
- if ($k0 != $md5) {
- $stat = "FAILED";
- echo "server:err ".$md5." != ".$key."\n";
- }
-
- $length += strlen($k0) + filesize($datafile);
-
- unlink($datafile);
- $ret .= $k0."(".strlen($k0).") ";
- }
- else{
- $stat = "FAILED";
- echo "server:file err ".file_upload_error_message($error)."\n";
- }
- }
- $ret .= "]";
- echo "server:got data length " .$length."\n";
-}
-
-
-echo "-{$stat}-POST(".count($_POST).") FILE(".count($_FILES).")\n";
-
-function file_upload_error_message($error_code) {
- switch ($error_code) {
- case UPLOAD_ERR_INI_SIZE:
- return 'The uploaded file exceeds the upload_max_filesize directive in php.ini';
- case UPLOAD_ERR_FORM_SIZE:
- return 'The uploaded file exceeds the MAX_FILE_SIZE directive that was specified in the HTML form';
- case UPLOAD_ERR_PARTIAL:
- return 'The uploaded file was only partially uploaded';
- case UPLOAD_ERR_NO_FILE:
- return 'No file was uploaded';
- case UPLOAD_ERR_NO_TMP_DIR:
- return 'Missing a temporary folder';
- case UPLOAD_ERR_CANT_WRITE:
- return 'Failed to write file to disk';
- case UPLOAD_ERR_EXTENSION:
- return 'File upload stopped by extension';
- default:
- return 'Unknown upload error';
- }
-}
\ No newline at end of file
diff --git a/caddyhttp/fastcgi/fcgiclient.go b/caddyhttp/fastcgi/fcgiclient.go
deleted file mode 100644
index 414e34f6a43..00000000000
--- a/caddyhttp/fastcgi/fcgiclient.go
+++ /dev/null
@@ -1,566 +0,0 @@
-// Forked Jan. 2015 from http://bitbucket.org/PinIdea/fcgi_client
-// (which is forked from https://code.google.com/p/go-fastcgi-client/)
-
-// This fork contains several fixes and improvements by Matt Holt and
-// other contributors to this project.
-
-// Copyright 2012 Junqing Tan and The Go Authors
-// Use of this source code is governed by a BSD-style
-// Part of source code is from Go fcgi package
-
-package fastcgi
-
-import (
- "bufio"
- "bytes"
- "context"
- "encoding/binary"
- "errors"
- "io"
- "io/ioutil"
- "mime/multipart"
- "net"
- "net/http"
- "net/http/httputil"
- "net/textproto"
- "net/url"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-// FCGIListenSockFileno describes listen socket file number.
-const FCGIListenSockFileno uint8 = 0
-
-// FCGIHeaderLen describes header length.
-const FCGIHeaderLen uint8 = 8
-
-// Version1 describes the version.
-const Version1 uint8 = 1
-
-// FCGINullRequestID describes the null request ID.
-const FCGINullRequestID uint8 = 0
-
-// FCGIKeepConn describes keep connection mode.
-const FCGIKeepConn uint8 = 1
-
-const (
- // BeginRequest is the begin request flag.
- BeginRequest uint8 = iota + 1
- // AbortRequest is the abort request flag.
- AbortRequest
- // EndRequest is the end request flag.
- EndRequest
- // Params is the parameters flag.
- Params
- // Stdin is the standard input flag.
- Stdin
- // Stdout is the standard output flag.
- Stdout
- // Stderr is the standard error flag.
- Stderr
- // Data is the data flag.
- Data
- // GetValues is the get values flag.
- GetValues
- // GetValuesResult is the get values result flag.
- GetValuesResult
- // UnknownType is the unknown type flag.
- UnknownType
- // MaxType is the maximum type flag.
- MaxType = UnknownType
-)
-
-const (
- // Responder is the responder flag.
- Responder uint8 = iota + 1
- // Authorizer is the authorizer flag.
- Authorizer
- // Filter is the filter flag.
- Filter
-)
-
-const (
- // RequestComplete is the completed request flag.
- RequestComplete uint8 = iota
- // CantMultiplexConns is the multiplexed connections flag.
- CantMultiplexConns
- // Overloaded is the overloaded flag.
- Overloaded
- // UnknownRole is the unknown role flag.
- UnknownRole
-)
-
-const (
- // MaxConns is the maximum connections flag.
- MaxConns string = "MAX_CONNS"
- // MaxRequests is the maximum requests flag.
- MaxRequests string = "MAX_REQS"
- // MultiplexConns is the multiplex connections flag.
- MultiplexConns string = "MPXS_CONNS"
-)
-
-const (
- maxWrite = 65500 // 65530 may work, but for compatibility
- maxPad = 255
-)
-
-type header struct {
- Version uint8
- Type uint8
- ID uint16
- ContentLength uint16
- PaddingLength uint8
- Reserved uint8
-}
-
-// for padding so we don't have to allocate all the time
-// not synchronized because we don't care what the contents are
-var pad [maxPad]byte
-
-func (h *header) init(recType uint8, reqID uint16, contentLength int) {
- h.Version = 1
- h.Type = recType
- h.ID = reqID
- h.ContentLength = uint16(contentLength)
- h.PaddingLength = uint8(-contentLength & 7)
-}
-
-type record struct {
- h header
- rbuf []byte
-}
-
-func (rec *record) read(r io.Reader) (buf []byte, err error) {
- if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil {
- return
- }
- if rec.h.Version != 1 {
- err = errors.New("fcgi: invalid header version")
- return
- }
- if rec.h.Type == EndRequest {
- err = io.EOF
- return
- }
- n := int(rec.h.ContentLength) + int(rec.h.PaddingLength)
- if len(rec.rbuf) < n {
- rec.rbuf = make([]byte, n)
- }
- if _, err = io.ReadFull(r, rec.rbuf[:n]); err != nil {
- return
- }
- buf = rec.rbuf[:int(rec.h.ContentLength)]
-
- return
-}
-
-// FCGIClient implements a FastCGI client, which is a standard for
-// interfacing external applications with Web servers.
-type FCGIClient struct {
- mutex sync.Mutex
- rwc io.ReadWriteCloser
- h header
- buf bytes.Buffer
- stderr bytes.Buffer
- keepAlive bool
- reqID uint16
- readTimeout time.Duration
- sendTimeout time.Duration
-}
-
-// DialWithDialerContext connects to the fcgi responder at the specified network address, using custom net.Dialer
-// and a context.
-// See func net.Dial for a description of the network and address parameters.
-func DialWithDialerContext(ctx context.Context, network, address string, dialer net.Dialer) (fcgi *FCGIClient, err error) {
- var conn net.Conn
- conn, err = dialer.DialContext(ctx, network, address)
- if err != nil {
- return
- }
-
- fcgi = &FCGIClient{
- rwc: conn,
- keepAlive: false,
- reqID: 1,
- }
-
- return
-}
-
-// DialContext is like Dial but passes ctx to dialer.Dial.
-func DialContext(ctx context.Context, network, address string) (fcgi *FCGIClient, err error) {
- return DialWithDialerContext(ctx, network, address, net.Dialer{})
-}
-
-// Dial connects to the fcgi responder at the specified network address, using default net.Dialer.
-// See func net.Dial for a description of the network and address parameters.
-func Dial(network, address string) (fcgi *FCGIClient, err error) {
- return DialContext(context.Background(), network, address)
-}
-
-// Close closes fcgi connnection
-func (c *FCGIClient) Close() {
- c.rwc.Close()
-}
-
-func (c *FCGIClient) writeRecord(recType uint8, content []byte) (err error) {
- c.mutex.Lock()
- defer c.mutex.Unlock()
- c.buf.Reset()
- c.h.init(recType, c.reqID, len(content))
- if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil {
- return err
- }
- if _, err := c.buf.Write(content); err != nil {
- return err
- }
- if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil {
- return err
- }
- _, err = c.rwc.Write(c.buf.Bytes())
- return err
-}
-
-func (c *FCGIClient) writeBeginRequest(role uint16, flags uint8) error {
- b := [8]byte{byte(role >> 8), byte(role), flags}
- return c.writeRecord(BeginRequest, b[:])
-}
-
-func (c *FCGIClient) writeEndRequest(appStatus int, protocolStatus uint8) error {
- b := make([]byte, 8)
- binary.BigEndian.PutUint32(b, uint32(appStatus))
- b[4] = protocolStatus
- return c.writeRecord(EndRequest, b)
-}
-
-func (c *FCGIClient) writePairs(recType uint8, pairs map[string]string) error {
- w := newWriter(c, recType)
- b := make([]byte, 8)
- nn := 0
- for k, v := range pairs {
- m := 8 + len(k) + len(v)
- if m > maxWrite {
- // param data size exceed 65535 bytes"
- vl := maxWrite - 8 - len(k)
- v = v[:vl]
- }
- n := encodeSize(b, uint32(len(k)))
- n += encodeSize(b[n:], uint32(len(v)))
- m = n + len(k) + len(v)
- if (nn + m) > maxWrite {
- w.Flush()
- nn = 0
- }
- nn += m
- if _, err := w.Write(b[:n]); err != nil {
- return err
- }
- if _, err := w.WriteString(k); err != nil {
- return err
- }
- if _, err := w.WriteString(v); err != nil {
- return err
- }
- }
- w.Close()
- return nil
-}
-
-func encodeSize(b []byte, size uint32) int {
- if size > 127 {
- size |= 1 << 31
- binary.BigEndian.PutUint32(b, size)
- return 4
- }
- b[0] = byte(size)
- return 1
-}
-
-// bufWriter encapsulates bufio.Writer but also closes the underlying stream when
-// Closed.
-type bufWriter struct {
- closer io.Closer
- *bufio.Writer
-}
-
-func (w *bufWriter) Close() error {
- if err := w.Writer.Flush(); err != nil {
- w.closer.Close()
- return err
- }
- return w.closer.Close()
-}
-
-func newWriter(c *FCGIClient, recType uint8) *bufWriter {
- s := &streamWriter{c: c, recType: recType}
- w := bufio.NewWriterSize(s, maxWrite)
- return &bufWriter{s, w}
-}
-
-// streamWriter abstracts out the separation of a stream into discrete records.
-// It only writes maxWrite bytes at a time.
-type streamWriter struct {
- c *FCGIClient
- recType uint8
-}
-
-func (w *streamWriter) Write(p []byte) (int, error) {
- nn := 0
- for len(p) > 0 {
- n := len(p)
- if n > maxWrite {
- n = maxWrite
- }
- if err := w.c.writeRecord(w.recType, p[:n]); err != nil {
- return nn, err
- }
- nn += n
- p = p[n:]
- }
- return nn, nil
-}
-
-func (w *streamWriter) Close() error {
- // send empty record to close the stream
- return w.c.writeRecord(w.recType, nil)
-}
-
-type streamReader struct {
- c *FCGIClient
- buf []byte
-}
-
-func (w *streamReader) Read(p []byte) (n int, err error) {
-
- if len(p) > 0 {
- if len(w.buf) == 0 {
-
- // filter outputs for error log
- for {
- rec := &record{}
- var buf []byte
- buf, err = rec.read(w.c.rwc)
- if err != nil {
- return
- }
- // standard error output
- if rec.h.Type == Stderr {
- w.c.stderr.Write(buf)
- continue
- }
- w.buf = buf
- break
- }
- }
-
- n = len(p)
- if n > len(w.buf) {
- n = len(w.buf)
- }
- copy(p, w.buf[:n])
- w.buf = w.buf[n:]
- }
-
- return
-}
-
-// Do made the request and returns a io.Reader that translates the data read
-// from fcgi responder out of fcgi packet before returning it.
-func (c *FCGIClient) Do(p map[string]string, req io.Reader) (r io.Reader, err error) {
- err = c.writeBeginRequest(uint16(Responder), 0)
- if err != nil {
- return
- }
-
- err = c.writePairs(Params, p)
- if err != nil {
- return
- }
-
- body := newWriter(c, Stdin)
- if req != nil {
- io.Copy(body, req)
- }
- body.Close()
-
- r = &streamReader{c: c}
- return
-}
-
-// clientCloser is a io.ReadCloser. It wraps a io.Reader with a Closer
-// that closes FCGIClient connection.
-type clientCloser struct {
- *FCGIClient
- io.Reader
-}
-
-func (f clientCloser) Close() error { return f.rwc.Close() }
-
-// Request returns a HTTP Response with Header and Body
-// from fcgi responder
-func (c *FCGIClient) Request(p map[string]string, req io.Reader) (resp *http.Response, err error) {
- r, err := c.Do(p, req)
- if err != nil {
- return
- }
-
- rb := bufio.NewReader(r)
- tp := textproto.NewReader(rb)
- resp = new(http.Response)
-
- // Parse the response headers.
- mimeHeader, err := tp.ReadMIMEHeader()
- if err != nil && err != io.EOF {
- return
- }
- resp.Header = http.Header(mimeHeader)
-
- if resp.Header.Get("Status") != "" {
- statusParts := strings.SplitN(resp.Header.Get("Status"), " ", 2)
- resp.StatusCode, err = strconv.Atoi(statusParts[0])
- if err != nil {
- return
- }
- if len(statusParts) > 1 {
- resp.Status = statusParts[1]
- }
-
- } else {
- resp.StatusCode = http.StatusOK
- }
-
- // TODO: fixTransferEncoding ?
- resp.TransferEncoding = resp.Header["Transfer-Encoding"]
- resp.ContentLength, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
-
- if chunked(resp.TransferEncoding) {
- resp.Body = clientCloser{c, httputil.NewChunkedReader(rb)}
- } else {
- resp.Body = clientCloser{c, ioutil.NopCloser(rb)}
- }
- return
-}
-
-// Get issues a GET request to the fcgi responder.
-func (c *FCGIClient) Get(p map[string]string) (resp *http.Response, err error) {
-
- p["REQUEST_METHOD"] = "GET"
- p["CONTENT_LENGTH"] = "0"
-
- return c.Request(p, nil)
-}
-
-// Head issues a HEAD request to the fcgi responder.
-func (c *FCGIClient) Head(p map[string]string) (resp *http.Response, err error) {
-
- p["REQUEST_METHOD"] = "HEAD"
- p["CONTENT_LENGTH"] = "0"
-
- return c.Request(p, nil)
-}
-
-// Options issues an OPTIONS request to the fcgi responder.
-func (c *FCGIClient) Options(p map[string]string) (resp *http.Response, err error) {
-
- p["REQUEST_METHOD"] = "OPTIONS"
- p["CONTENT_LENGTH"] = "0"
-
- return c.Request(p, nil)
-}
-
-// Post issues a POST request to the fcgi responder. with request body
-// in the format that bodyType specified
-func (c *FCGIClient) Post(p map[string]string, method string, bodyType string, body io.Reader, l int64) (resp *http.Response, err error) {
- if p == nil {
- p = make(map[string]string)
- }
-
- p["REQUEST_METHOD"] = strings.ToUpper(method)
-
- if len(p["REQUEST_METHOD"]) == 0 || p["REQUEST_METHOD"] == "GET" {
- p["REQUEST_METHOD"] = "POST"
- }
-
- p["CONTENT_LENGTH"] = strconv.FormatInt(l, 10)
- if len(bodyType) > 0 {
- p["CONTENT_TYPE"] = bodyType
- } else {
- p["CONTENT_TYPE"] = "application/x-www-form-urlencoded"
- }
-
- return c.Request(p, body)
-}
-
-// PostForm issues a POST to the fcgi responder, with form
-// as a string key to a list values (url.Values)
-func (c *FCGIClient) PostForm(p map[string]string, data url.Values) (resp *http.Response, err error) {
- body := bytes.NewReader([]byte(data.Encode()))
- return c.Post(p, "POST", "application/x-www-form-urlencoded", body, int64(body.Len()))
-}
-
-// PostFile issues a POST to the fcgi responder in multipart(RFC 2046) standard,
-// with form as a string key to a list values (url.Values),
-// and/or with file as a string key to a list file path.
-func (c *FCGIClient) PostFile(p map[string]string, data url.Values, file map[string]string) (resp *http.Response, err error) {
- buf := &bytes.Buffer{}
- writer := multipart.NewWriter(buf)
- bodyType := writer.FormDataContentType()
-
- for key, val := range data {
- for _, v0 := range val {
- err = writer.WriteField(key, v0)
- if err != nil {
- return
- }
- }
- }
-
- for key, val := range file {
- fd, e := os.Open(val)
- if e != nil {
- return nil, e
- }
- defer fd.Close()
-
- part, e := writer.CreateFormFile(key, filepath.Base(val))
- if e != nil {
- return nil, e
- }
- _, err = io.Copy(part, fd)
- if err != nil {
- return
- }
- }
-
- err = writer.Close()
- if err != nil {
- return
- }
-
- return c.Post(p, "POST", bodyType, buf, int64(buf.Len()))
-}
-
-// SetReadTimeout sets the read timeout for future calls that read from the
-// fcgi responder. A zero value for t means no timeout will be set.
-func (c *FCGIClient) SetReadTimeout(t time.Duration) error {
- if conn, ok := c.rwc.(net.Conn); ok && t != 0 {
- return conn.SetReadDeadline(time.Now().Add(t))
- }
- return nil
-}
-
-// SetSendTimeout sets the read timeout for future calls that send data to
-// the fcgi responder. A zero value for t means no timeout will be set.
-func (c *FCGIClient) SetSendTimeout(t time.Duration) error {
- if conn, ok := c.rwc.(net.Conn); ok && t != 0 {
- return conn.SetWriteDeadline(time.Now().Add(t))
- }
- return nil
-}
-
-// Checks whether chunked is part of the encodings stack
-func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
diff --git a/caddyhttp/fastcgi/fcgiclient_test.go b/caddyhttp/fastcgi/fcgiclient_test.go
deleted file mode 100644
index ce897b10abe..00000000000
--- a/caddyhttp/fastcgi/fcgiclient_test.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// NOTE: These tests were adapted from the original
-// repository from which this package was forked.
-// The tests are slow (~10s) and in dire need of rewriting.
-// As such, the tests have been disabled to speed up
-// automated builds until they can be properly written.
-
-package fastcgi
-
-import (
- "bytes"
- "crypto/md5"
- "encoding/binary"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "math/rand"
- "net"
- "net/http"
- "net/http/fcgi"
- "net/url"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "testing"
- "time"
-)
-
-// test fcgi protocol includes:
-// Get, Post, Post in multipart/form-data, and Post with files
-// each key should be the md5 of the value or the file uploaded
-// sepicify remote fcgi responer ip:port to test with php
-// test failed if the remote fcgi(script) failed md5 verification
-// and output "FAILED" in response
-const (
- scriptFile = "/tank/www/fcgic_test.php"
- //ipPort = "remote-php-serv:59000"
- ipPort = "127.0.0.1:59000"
-)
-
-var globalt *testing.T
-
-type FastCGIServer struct{}
-
-func (s FastCGIServer) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
-
- req.ParseMultipartForm(100000000)
-
- stat := "PASSED"
- fmt.Fprintln(resp, "-")
- fileNum := 0
- {
- length := 0
- for k0, v0 := range req.Form {
- h := md5.New()
- io.WriteString(h, v0[0])
- md5 := fmt.Sprintf("%x", h.Sum(nil))
-
- length += len(k0)
- length += len(v0[0])
-
- // echo error when key != md5(val)
- if md5 != k0 {
- fmt.Fprintln(resp, "server:err ", md5, k0)
- stat = "FAILED"
- }
- }
- if req.MultipartForm != nil {
- fileNum = len(req.MultipartForm.File)
- for kn, fns := range req.MultipartForm.File {
- //fmt.Fprintln(resp, "server:filekey ", kn )
- length += len(kn)
- for _, f := range fns {
- fd, err := f.Open()
- if err != nil {
- log.Println("server:", err)
- return
- }
- h := md5.New()
- l0, err := io.Copy(h, fd)
- if err != nil {
- log.Println(err)
- return
- }
- length += int(l0)
- defer fd.Close()
- md5 := fmt.Sprintf("%x", h.Sum(nil))
- //fmt.Fprintln(resp, "server:filemd5 ", md5 )
-
- if kn != md5 {
- fmt.Fprintln(resp, "server:err ", md5, kn)
- stat = "FAILED"
- }
- //fmt.Fprintln(resp, "server:filename ", f.Filename )
- }
- }
- }
-
- fmt.Fprintln(resp, "server:got data length", length)
- }
- fmt.Fprintln(resp, "-"+stat+"-POST(", len(req.Form), ")-FILE(", fileNum, ")--")
-}
-
-func sendFcgi(reqType int, fcgiParams map[string]string, data []byte, posts map[string]string, files map[string]string) (content []byte) {
- fcgi, err := Dial("tcp", ipPort)
- if err != nil {
- log.Println("err:", err)
- return
- }
-
- length := 0
-
- var resp *http.Response
- switch reqType {
- case 0:
- if len(data) > 0 {
- length = len(data)
- rd := bytes.NewReader(data)
- resp, err = fcgi.Post(fcgiParams, "", "", rd, int64(rd.Len()))
- } else if len(posts) > 0 {
- values := url.Values{}
- for k, v := range posts {
- values.Set(k, v)
- length += len(k) + 2 + len(v)
- }
- resp, err = fcgi.PostForm(fcgiParams, values)
- } else {
- resp, err = fcgi.Get(fcgiParams)
- }
-
- default:
- values := url.Values{}
- for k, v := range posts {
- values.Set(k, v)
- length += len(k) + 2 + len(v)
- }
-
- for k, v := range files {
- fi, _ := os.Lstat(v)
- length += len(k) + int(fi.Size())
- }
- resp, err = fcgi.PostFile(fcgiParams, values, files)
- }
-
- if err != nil {
- log.Println("err:", err)
- return
- }
-
- defer resp.Body.Close()
- content, _ = ioutil.ReadAll(resp.Body)
-
- log.Println("c: send data length ≈", length, string(content))
- fcgi.Close()
- time.Sleep(1 * time.Second)
-
- if bytes.Index(content, []byte("FAILED")) >= 0 {
- globalt.Error("Server return failed message")
- }
-
- return
-}
-
-func generateRandFile(size int) (p string, m string) {
-
- p = filepath.Join(os.TempDir(), "fcgict"+strconv.Itoa(rand.Int()))
-
- // open output file
- fo, err := os.Create(p)
- if err != nil {
- panic(err)
- }
- // close fo on exit and check for its returned error
- defer func() {
- if err := fo.Close(); err != nil {
- panic(err)
- }
- }()
-
- h := md5.New()
- for i := 0; i < size/16; i++ {
- buf := make([]byte, 16)
- binary.PutVarint(buf, rand.Int63())
- fo.Write(buf)
- h.Write(buf)
- }
- m = fmt.Sprintf("%x", h.Sum(nil))
- return
-}
-
-func DisabledTest(t *testing.T) {
- // TODO: test chunked reader
- globalt = t
-
- rand.Seed(time.Now().UTC().UnixNano())
-
- // server
- go func() {
- listener, err := net.Listen("tcp", ipPort)
- if err != nil {
- // handle error
- log.Println("listener creation failed: ", err)
- }
-
- srv := new(FastCGIServer)
- fcgi.Serve(listener, srv)
- }()
-
- time.Sleep(1 * time.Second)
-
- // init
- fcgiParams := make(map[string]string)
- fcgiParams["REQUEST_METHOD"] = "GET"
- fcgiParams["SERVER_PROTOCOL"] = "HTTP/1.1"
- //fcgi_params["GATEWAY_INTERFACE"] = "CGI/1.1"
- fcgiParams["SCRIPT_FILENAME"] = scriptFile
-
- // simple GET
- log.Println("test:", "get")
- sendFcgi(0, fcgiParams, nil, nil, nil)
-
- // simple post data
- log.Println("test:", "post")
- sendFcgi(0, fcgiParams, []byte("c4ca4238a0b923820dcc509a6f75849b=1&7b8b965ad4bca0e41ab51de7b31363a1=n"), nil, nil)
-
- log.Println("test:", "post data (more than 60KB)")
- data := ""
- for i := 0x00; i < 0xff; i++ {
- v0 := strings.Repeat(string(i), 256)
- h := md5.New()
- io.WriteString(h, v0)
- k0 := fmt.Sprintf("%x", h.Sum(nil))
- data += k0 + "=" + url.QueryEscape(v0) + "&"
- }
- sendFcgi(0, fcgiParams, []byte(data), nil, nil)
-
- log.Println("test:", "post form (use url.Values)")
- p0 := make(map[string]string, 1)
- p0["c4ca4238a0b923820dcc509a6f75849b"] = "1"
- p0["7b8b965ad4bca0e41ab51de7b31363a1"] = "n"
- sendFcgi(1, fcgiParams, nil, p0, nil)
-
- log.Println("test:", "post forms (256 keys, more than 1MB)")
- p1 := make(map[string]string, 1)
- for i := 0x00; i < 0xff; i++ {
- v0 := strings.Repeat(string(i), 4096)
- h := md5.New()
- io.WriteString(h, v0)
- k0 := fmt.Sprintf("%x", h.Sum(nil))
- p1[k0] = v0
- }
- sendFcgi(1, fcgiParams, nil, p1, nil)
-
- log.Println("test:", "post file (1 file, 500KB)) ")
- f0 := make(map[string]string, 1)
- path0, m0 := generateRandFile(500000)
- f0[m0] = path0
- sendFcgi(1, fcgiParams, nil, p1, f0)
-
- log.Println("test:", "post multiple files (2 files, 5M each) and forms (256 keys, more than 1MB data")
- path1, m1 := generateRandFile(5000000)
- f0[m1] = path1
- sendFcgi(1, fcgiParams, nil, p1, f0)
-
- log.Println("test:", "post only files (2 files, 5M each)")
- sendFcgi(1, fcgiParams, nil, nil, f0)
-
- log.Println("test:", "post only 1 file")
- delete(f0, "m0")
- sendFcgi(1, fcgiParams, nil, nil, f0)
-
- os.Remove(path0)
- os.Remove(path1)
-}
diff --git a/caddyhttp/fastcgi/setup.go b/caddyhttp/fastcgi/setup.go
deleted file mode 100644
index 2b0fef8c913..00000000000
--- a/caddyhttp/fastcgi/setup.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package fastcgi
-
-import (
- "errors"
- "net/http"
- "path/filepath"
- "time"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("fastcgi", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// setup configures a new FastCGI middleware instance.
-func setup(c *caddy.Controller) error {
- cfg := httpserver.GetConfig(c)
-
- rules, err := fastcgiParse(c)
- if err != nil {
- return err
- }
-
- cfg.AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- return Handler{
- Next: next,
- Rules: rules,
- Root: cfg.Root,
- FileSys: http.Dir(cfg.Root),
- SoftwareName: caddy.AppName,
- SoftwareVersion: caddy.AppVersion,
- ServerName: cfg.Addr.Host,
- ServerPort: cfg.Addr.Port,
- }
- })
-
- return nil
-}
-
-func fastcgiParse(c *caddy.Controller) ([]Rule, error) {
- var rules []Rule
-
- cfg := httpserver.GetConfig(c)
- absRoot, err := filepath.Abs(cfg.Root)
- if err != nil {
- return nil, err
- }
-
- for c.Next() {
- args := c.RemainingArgs()
-
- if len(args) < 2 || len(args) > 3 {
- return rules, c.ArgErr()
- }
-
- rule := Rule{
- Root: absRoot,
- Path: args[0],
- }
- upstreams := []string{args[1]}
-
- if len(args) == 3 {
- if err := fastcgiPreset(args[2], &rule); err != nil {
- return rules, err
- }
- }
-
- var err error
-
- for c.NextBlock() {
- switch c.Val() {
- case "root":
- if !c.NextArg() {
- return rules, c.ArgErr()
- }
- rule.Root = c.Val()
-
- case "ext":
- if !c.NextArg() {
- return rules, c.ArgErr()
- }
- rule.Ext = c.Val()
- case "split":
- if !c.NextArg() {
- return rules, c.ArgErr()
- }
- rule.SplitPath = c.Val()
- case "index":
- args := c.RemainingArgs()
- if len(args) == 0 {
- return rules, c.ArgErr()
- }
- rule.IndexFiles = args
-
- case "upstream":
- args := c.RemainingArgs()
-
- if len(args) != 1 {
- return rules, c.ArgErr()
- }
-
- upstreams = append(upstreams, args[0])
- case "env":
- envArgs := c.RemainingArgs()
- if len(envArgs) < 2 {
- return rules, c.ArgErr()
- }
- rule.EnvVars = append(rule.EnvVars, [2]string{envArgs[0], envArgs[1]})
- case "except":
- ignoredPaths := c.RemainingArgs()
- if len(ignoredPaths) == 0 {
- return rules, c.ArgErr()
- }
- rule.IgnoredSubPaths = ignoredPaths
-
- case "connect_timeout":
- if !c.NextArg() {
- return rules, c.ArgErr()
- }
- rule.ConnectTimeout, err = time.ParseDuration(c.Val())
- if err != nil {
- return rules, err
- }
- case "read_timeout":
- if !c.NextArg() {
- return rules, c.ArgErr()
- }
- readTimeout, err := time.ParseDuration(c.Val())
- if err != nil {
- return rules, err
- }
- rule.ReadTimeout = readTimeout
- case "send_timeout":
- if !c.NextArg() {
- return rules, c.ArgErr()
- }
- sendTimeout, err := time.ParseDuration(c.Val())
- if err != nil {
- return rules, err
- }
- rule.SendTimeout = sendTimeout
- }
- }
-
- rule.balancer = &roundRobin{addresses: upstreams, index: -1}
-
- rules = append(rules, rule)
- }
- return rules, nil
-}
-
-// fastcgiPreset configures rule according to name. It returns an error if
-// name is not a recognized preset name.
-func fastcgiPreset(name string, rule *Rule) error {
- switch name {
- case "php":
- rule.Ext = ".php"
- rule.SplitPath = ".php"
- rule.IndexFiles = []string{"index.php"}
- default:
- return errors.New(name + " is not a valid preset name")
- }
- return nil
-}
diff --git a/caddyhttp/fastcgi/setup_test.go b/caddyhttp/fastcgi/setup_test.go
deleted file mode 100644
index 88ba9ed46d7..00000000000
--- a/caddyhttp/fastcgi/setup_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package fastcgi
-
-import (
- "fmt"
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
- c := caddy.NewTestController("http", `fastcgi / 127.0.0.1:9000`)
- err := setup(c)
- if err != nil {
- t.Errorf("Expected no errors, got: %v", err)
- }
- mids := httpserver.GetConfig(c).Middleware()
- if len(mids) == 0 {
- t.Fatal("Expected middleware, got 0 instead")
- }
-
- handler := mids[0](httpserver.EmptyNext)
- myHandler, ok := handler.(Handler)
-
- if !ok {
- t.Fatalf("Expected handler to be type , got: %#v", handler)
- }
-
- if myHandler.Rules[0].Path != "/" {
- t.Errorf("Expected / as the Path")
- }
- if myHandler.Rules[0].Address() != "127.0.0.1:9000" {
- t.Errorf("Expected 127.0.0.1:9000 as the Address")
- }
-
-}
-
-func TestFastcgiParse(t *testing.T) {
- tests := []struct {
- inputFastcgiConfig string
- shouldErr bool
- expectedFastcgiConfig []Rule
- }{
-
- {`fastcgi /blog 127.0.0.1:9000 php`,
- false, []Rule{{
- Path: "/blog",
- balancer: &roundRobin{addresses: []string{"127.0.0.1:9000"}},
- Ext: ".php",
- SplitPath: ".php",
- IndexFiles: []string{"index.php"},
- }}},
- {`fastcgi / 127.0.0.1:9001 {
- split .html
- }`,
- false, []Rule{{
- Path: "/",
- balancer: &roundRobin{addresses: []string{"127.0.0.1:9001"}},
- Ext: "",
- SplitPath: ".html",
- IndexFiles: []string{},
- }}},
- {`fastcgi / 127.0.0.1:9001 {
- split .html
- except /admin /user
- }`,
- false, []Rule{{
- Path: "/",
- balancer: &roundRobin{addresses: []string{"127.0.0.1:9001"}},
- Ext: "",
- SplitPath: ".html",
- IndexFiles: []string{},
- IgnoredSubPaths: []string{"/admin", "/user"},
- }}},
- }
- for i, test := range tests {
- actualFastcgiConfigs, err := fastcgiParse(caddy.NewTestController("http", test.inputFastcgiConfig))
-
- if err == nil && test.shouldErr {
- t.Errorf("Test %d didn't error, but it should have", i)
- } else if err != nil && !test.shouldErr {
- t.Errorf("Test %d errored, but it shouldn't have; got '%v'", i, err)
- }
- if len(actualFastcgiConfigs) != len(test.expectedFastcgiConfig) {
- t.Fatalf("Test %d expected %d no of FastCGI configs, but got %d ",
- i, len(test.expectedFastcgiConfig), len(actualFastcgiConfigs))
- }
- for j, actualFastcgiConfig := range actualFastcgiConfigs {
-
- if actualFastcgiConfig.Path != test.expectedFastcgiConfig[j].Path {
- t.Errorf("Test %d expected %dth FastCGI Path to be %s , but got %s",
- i, j, test.expectedFastcgiConfig[j].Path, actualFastcgiConfig.Path)
- }
-
- if actualFastcgiConfig.Address() != test.expectedFastcgiConfig[j].Address() {
- t.Errorf("Test %d expected %dth FastCGI Address to be %s , but got %s",
- i, j, test.expectedFastcgiConfig[j].Address(), actualFastcgiConfig.Address())
- }
-
- if actualFastcgiConfig.Ext != test.expectedFastcgiConfig[j].Ext {
- t.Errorf("Test %d expected %dth FastCGI Ext to be %s , but got %s",
- i, j, test.expectedFastcgiConfig[j].Ext, actualFastcgiConfig.Ext)
- }
-
- if actualFastcgiConfig.SplitPath != test.expectedFastcgiConfig[j].SplitPath {
- t.Errorf("Test %d expected %dth FastCGI SplitPath to be %s , but got %s",
- i, j, test.expectedFastcgiConfig[j].SplitPath, actualFastcgiConfig.SplitPath)
- }
-
- if fmt.Sprint(actualFastcgiConfig.IndexFiles) != fmt.Sprint(test.expectedFastcgiConfig[j].IndexFiles) {
- t.Errorf("Test %d expected %dth FastCGI IndexFiles to be %s , but got %s",
- i, j, test.expectedFastcgiConfig[j].IndexFiles, actualFastcgiConfig.IndexFiles)
- }
-
- if fmt.Sprint(actualFastcgiConfig.IgnoredSubPaths) != fmt.Sprint(test.expectedFastcgiConfig[j].IgnoredSubPaths) {
- t.Errorf("Test %d expected %dth FastCGI IgnoredSubPaths to be %s , but got %s",
- i, j, test.expectedFastcgiConfig[j].IgnoredSubPaths, actualFastcgiConfig.IgnoredSubPaths)
- }
- }
- }
-
-}
diff --git a/caddyhttp/gzip/gzip.go b/caddyhttp/gzip/gzip.go
deleted file mode 100644
index bd8692cdcb1..00000000000
--- a/caddyhttp/gzip/gzip.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Package gzip provides a middleware layer that performs
-// gzip compression on the response.
-package gzip
-
-import (
- "io"
- "net/http"
- "strings"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("gzip", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-
- initWriterPool()
-}
-
-// Gzip is a middleware type which gzips HTTP responses. It is
-// imperative that any handler which writes to a gzipped response
-// specifies the Content-Type, otherwise some clients will assume
-// application/x-gzip and try to download a file.
-type Gzip struct {
- Next httpserver.Handler
- Configs []Config
-}
-
-// Config holds the configuration for Gzip middleware
-type Config struct {
- RequestFilters []RequestFilter
- ResponseFilters []ResponseFilter
- Level int // Compression level
-}
-
-// ServeHTTP serves a gzipped response if the client supports it.
-func (g Gzip) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
- return g.Next.ServeHTTP(w, r)
- }
-outer:
- for _, c := range g.Configs {
-
- // Check request filters to determine if gzipping is permitted for this request
- for _, filter := range c.RequestFilters {
- if !filter.ShouldCompress(r) {
- continue outer
- }
- }
-
- // gzipWriter modifies underlying writer at init,
- // use a discard writer instead to leave ResponseWriter in
- // original form.
- gzipWriter := getWriter(c.Level)
- defer putWriter(c.Level, gzipWriter)
- gz := &gzipResponseWriter{
- Writer: gzipWriter,
- ResponseWriterWrapper: &httpserver.ResponseWriterWrapper{ResponseWriter: w},
- }
-
- var rw http.ResponseWriter
- // if no response filter is used
- if len(c.ResponseFilters) == 0 {
- // replace discard writer with ResponseWriter
- gzipWriter.Reset(w)
- rw = gz
- } else {
- // wrap gzip writer with ResponseFilterWriter
- rw = NewResponseFilterWriter(c.ResponseFilters, gz)
- }
-
- // Any response in forward middleware will now be compressed
- status, err := g.Next.ServeHTTP(rw, r)
-
- // If there was an error that remained unhandled, we need
- // to send something back before gzipWriter gets closed at
- // the return of this method!
- if status >= 400 {
- httpserver.DefaultErrorFunc(w, r, status)
- return 0, err
- }
- return status, err
- }
-
- // no matching filter
- return g.Next.ServeHTTP(w, r)
-}
-
-// gzipResponeWriter wraps the underlying Write method
-// with a gzip.Writer to compress the output.
-type gzipResponseWriter struct {
- io.Writer
- *httpserver.ResponseWriterWrapper
- statusCodeWritten bool
-}
-
-// WriteHeader wraps the underlying WriteHeader method to prevent
-// problems with conflicting headers from proxied backends. For
-// example, a backend system that calculates Content-Length would
-// be wrong because it doesn't know it's being gzipped.
-func (w *gzipResponseWriter) WriteHeader(code int) {
- w.Header().Del("Content-Length")
- w.Header().Set("Content-Encoding", "gzip")
- w.Header().Add("Vary", "Accept-Encoding")
- originalEtag := w.Header().Get("ETag")
- if originalEtag != "" && !strings.HasPrefix(originalEtag, "W/") {
- w.Header().Set("ETag", "W/"+originalEtag)
- }
- w.ResponseWriterWrapper.WriteHeader(code)
- w.statusCodeWritten = true
-}
-
-// Write wraps the underlying Write method to do compression.
-func (w *gzipResponseWriter) Write(b []byte) (int, error) {
- if w.Header().Get("Content-Type") == "" {
- w.Header().Set("Content-Type", http.DetectContentType(b))
- }
- if !w.statusCodeWritten {
- w.WriteHeader(http.StatusOK)
- }
- n, err := w.Writer.Write(b)
- return n, err
-}
-
-// Interface guards
-var _ httpserver.HTTPInterfaces = (*gzipResponseWriter)(nil)
diff --git a/caddyhttp/gzip/gzip_test.go b/caddyhttp/gzip/gzip_test.go
deleted file mode 100644
index 494dec62a25..00000000000
--- a/caddyhttp/gzip/gzip_test.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package gzip
-
-import (
- "compress/gzip"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "strings"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestGzipHandler(t *testing.T) {
- pathFilter := PathFilter{make(Set)}
- badPaths := []string{"/bad", "/nogzip", "/nongzip"}
- for _, p := range badPaths {
- pathFilter.IgnoredPaths.Add(p)
- }
- extFilter := ExtFilter{make(Set)}
- for _, e := range []string{".txt", ".html", ".css", ".md"} {
- extFilter.Exts.Add(e)
- }
- gz := Gzip{Configs: []Config{
- {RequestFilters: []RequestFilter{pathFilter, extFilter}},
- }}
-
- w := httptest.NewRecorder()
- gz.Next = nextFunc(true)
- var exts = []string{
- ".html", ".css", ".md",
- }
- for _, e := range exts {
- url := "/file" + e
- r, err := http.NewRequest("GET", url, nil)
- if err != nil {
- t.Error(err)
- }
- r.Header.Set("Accept-Encoding", "gzip")
- w.Header().Set("ETag", `"2n9cd"`)
- _, err = gz.ServeHTTP(w, r)
- if err != nil {
- t.Error(err)
- }
-
- // The second pass, test if the ETag is already weak
- w.Header().Set("ETag", `W/"2n9cd"`)
- _, err = gz.ServeHTTP(w, r)
- if err != nil {
- t.Error(err)
- }
- }
-
- w = httptest.NewRecorder()
- gz.Next = nextFunc(false)
- for _, p := range badPaths {
- for _, e := range exts {
- url := p + "/file" + e
- r, err := http.NewRequest("GET", url, nil)
- if err != nil {
- t.Error(err)
- }
- r.Header.Set("Accept-Encoding", "gzip")
- _, err = gz.ServeHTTP(w, r)
- if err != nil {
- t.Error(err)
- }
- }
- }
-
- w = httptest.NewRecorder()
- gz.Next = nextFunc(false)
- exts = []string{
- ".htm1", ".abc", ".mdx",
- }
- for _, e := range exts {
- url := "/file" + e
- r, err := http.NewRequest("GET", url, nil)
- if err != nil {
- t.Error(err)
- }
- r.Header.Set("Accept-Encoding", "gzip")
- _, err = gz.ServeHTTP(w, r)
- if err != nil {
- t.Error(err)
- }
- }
-
- // test all levels
- w = httptest.NewRecorder()
- gz.Next = nextFunc(true)
- for i := 0; i <= gzip.BestCompression; i++ {
- gz.Configs[0].Level = i
- r, err := http.NewRequest("GET", "/file.txt", nil)
- if err != nil {
- t.Error(err)
- }
- r.Header.Set("Accept-Encoding", "gzip")
- _, err = gz.ServeHTTP(w, r)
- if err != nil {
- t.Error(err)
- }
- }
-}
-
-func nextFunc(shouldGzip bool) httpserver.Handler {
- return httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- // write a relatively large text file
- b, err := ioutil.ReadFile("testdata/test.txt")
- if err != nil {
- return 500, err
- }
- if _, err := w.Write(b); err != nil {
- return 500, err
- }
-
- if shouldGzip {
- if w.Header().Get("Content-Encoding") != "gzip" {
- return 0, fmt.Errorf("Content-Encoding must be gzip, found %v", w.Header().Get("Content-Encoding"))
- }
- if w.Header().Get("Vary") != "Accept-Encoding" {
- return 0, fmt.Errorf("Vary must be Accept-Encoding, found %v", w.Header().Get("Vary"))
- }
- etag := w.Header().Get("ETag")
- if etag != "" && etag != `W/"2n9cd"` {
- return 0, fmt.Errorf("ETag must be converted to weak Etag, found %v", w.Header().Get("ETag"))
- }
- if _, ok := w.(*gzipResponseWriter); !ok {
- return 0, fmt.Errorf("ResponseWriter should be gzipResponseWriter, found %T", w)
- }
- if strings.Contains(w.Header().Get("Content-Type"), "application/x-gzip") {
- return 0, fmt.Errorf("Content-Type should not be gzip")
- }
- return 0, nil
- }
- if r.Header.Get("Accept-Encoding") == "" {
- return 0, fmt.Errorf("Accept-Encoding header expected")
- }
- if w.Header().Get("Content-Encoding") == "gzip" {
- return 0, fmt.Errorf("Content-Encoding must not be gzip, found gzip")
- }
- if _, ok := w.(*gzipResponseWriter); ok {
- return 0, fmt.Errorf("ResponseWriter should not be gzipResponseWriter")
- }
- return 0, nil
- })
-}
-
-func BenchmarkGzip(b *testing.B) {
- pathFilter := PathFilter{make(Set)}
- badPaths := []string{"/bad", "/nogzip", "/nongzip"}
- for _, p := range badPaths {
- pathFilter.IgnoredPaths.Add(p)
- }
- extFilter := ExtFilter{make(Set)}
- for _, e := range []string{".txt", ".html", ".css", ".md"} {
- extFilter.Exts.Add(e)
- }
- gz := Gzip{Configs: []Config{
- {
- RequestFilters: []RequestFilter{pathFilter, extFilter},
- },
- }}
-
- w := httptest.NewRecorder()
- gz.Next = nextFunc(true)
- url := "/file.txt"
- r, err := http.NewRequest("GET", url, nil)
- if err != nil {
- b.Fatal(err)
- }
- r.Header.Set("Accept-Encoding", "gzip")
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err = gz.ServeHTTP(w, r)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/caddyhttp/gzip/requestfilter.go b/caddyhttp/gzip/requestfilter.go
deleted file mode 100644
index 804232a9d2a..00000000000
--- a/caddyhttp/gzip/requestfilter.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package gzip
-
-import (
- "net/http"
- "path"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// RequestFilter determines if a request should be gzipped.
-type RequestFilter interface {
- // ShouldCompress tells if gzip compression
- // should be done on the request.
- ShouldCompress(*http.Request) bool
-}
-
-// defaultExtensions is the list of default extensions for which to enable gzipping.
-var defaultExtensions = []string{"", ".txt", ".htm", ".html", ".css", ".php", ".js", ".json",
- ".md", ".mdown", ".xml", ".svg", ".go", ".cgi", ".py", ".pl", ".aspx", ".asp"}
-
-// DefaultExtFilter creates an ExtFilter with default extensions.
-func DefaultExtFilter() ExtFilter {
- m := ExtFilter{Exts: make(Set)}
- for _, extension := range defaultExtensions {
- m.Exts.Add(extension)
- }
- return m
-}
-
-// ExtFilter is RequestFilter for file name extensions.
-type ExtFilter struct {
- // Exts is the file name extensions to accept
- Exts Set
-}
-
-// ExtWildCard is the wildcard for extensions.
-const ExtWildCard = "*"
-
-// ShouldCompress checks if the request file extension matches any
-// of the registered extensions. It returns true if the extension is
-// found and false otherwise.
-func (e ExtFilter) ShouldCompress(r *http.Request) bool {
- ext := path.Ext(r.URL.Path)
- return e.Exts.Contains(ExtWildCard) || e.Exts.Contains(ext)
-}
-
-// PathFilter is RequestFilter for request path.
-type PathFilter struct {
- // IgnoredPaths is the paths to ignore
- IgnoredPaths Set
-}
-
-// ShouldCompress checks if the request path matches any of the
-// registered paths to ignore. It returns false if an ignored path
-// is found and true otherwise.
-func (p PathFilter) ShouldCompress(r *http.Request) bool {
- return !p.IgnoredPaths.ContainsFunc(func(value string) bool {
- return httpserver.Path(r.URL.Path).Matches(value)
- })
-}
-
-// Set stores distinct strings.
-type Set map[string]struct{}
-
-// Add adds an element to the set.
-func (s Set) Add(value string) {
- s[value] = struct{}{}
-}
-
-// Remove removes an element from the set.
-func (s Set) Remove(value string) {
- delete(s, value)
-}
-
-// Contains check if the set contains value.
-func (s Set) Contains(value string) bool {
- _, ok := s[value]
- return ok
-}
-
-// ContainsFunc is similar to Contains. It iterates all the
-// elements in the set and passes each to f. It returns true
-// on the first call to f that returns true and false otherwise.
-func (s Set) ContainsFunc(f func(string) bool) bool {
- for k := range s {
- if f(k) {
- return true
- }
- }
- return false
-}
diff --git a/caddyhttp/gzip/requestfilter_test.go b/caddyhttp/gzip/requestfilter_test.go
deleted file mode 100644
index ce31d7faf8c..00000000000
--- a/caddyhttp/gzip/requestfilter_test.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package gzip
-
-import (
- "net/http"
- "testing"
-)
-
-func TestSet(t *testing.T) {
- set := make(Set)
- set.Add("a")
- if len(set) != 1 {
- t.Errorf("Expected 1 found %v", len(set))
- }
- set.Add("a")
- if len(set) != 1 {
- t.Errorf("Expected 1 found %v", len(set))
- }
- set.Add("b")
- if len(set) != 2 {
- t.Errorf("Expected 2 found %v", len(set))
- }
- if !set.Contains("a") {
- t.Errorf("Set should contain a")
- }
- if !set.Contains("b") {
- t.Errorf("Set should contain a")
- }
- set.Add("c")
- if len(set) != 3 {
- t.Errorf("Expected 3 found %v", len(set))
- }
- if !set.Contains("c") {
- t.Errorf("Set should contain c")
- }
- set.Remove("a")
- if len(set) != 2 {
- t.Errorf("Expected 2 found %v", len(set))
- }
- if set.Contains("a") {
- t.Errorf("Set should not contain a")
- }
- if !set.ContainsFunc(func(v string) bool {
- return v == "c"
- }) {
- t.Errorf("ContainsFunc should return true")
- }
-}
-
-func TestExtFilter(t *testing.T) {
- var filter RequestFilter = ExtFilter{make(Set)}
- for _, e := range []string{".txt", ".html", ".css", ".md"} {
- filter.(ExtFilter).Exts.Add(e)
- }
- r := urlRequest("file.txt")
- if !filter.ShouldCompress(r) {
- t.Errorf("Should be valid filter")
- }
- var exts = []string{
- ".html", ".css", ".md",
- }
- for i, e := range exts {
- r := urlRequest("file" + e)
- if !filter.ShouldCompress(r) {
- t.Errorf("Test %v: Should be valid filter", i)
- }
- }
- exts = []string{
- ".htm1", ".abc", ".mdx",
- }
- for i, e := range exts {
- r := urlRequest("file" + e)
- if filter.ShouldCompress(r) {
- t.Errorf("Test %v: Should not be valid filter", i)
- }
- }
- filter.(ExtFilter).Exts.Add(ExtWildCard)
- for i, e := range exts {
- r := urlRequest("file" + e)
- if !filter.ShouldCompress(r) {
- t.Errorf("Test %v: Should be valid filter. Wildcard used.", i)
- }
- }
-}
-
-func TestPathFilter(t *testing.T) {
- paths := []string{
- "/a", "/b", "/c", "/de",
- }
- var filter RequestFilter = PathFilter{make(Set)}
- for _, p := range paths {
- filter.(PathFilter).IgnoredPaths.Add(p)
- }
- for i, p := range paths {
- r := urlRequest(p)
- if filter.ShouldCompress(r) {
- t.Errorf("Test %v: Should not be valid filter", i)
- }
- }
- paths = []string{
- "/f", "/g", "/h", "/ed",
- }
- for i, p := range paths {
- r := urlRequest(p)
- if !filter.ShouldCompress(r) {
- t.Errorf("Test %v: Should be valid filter", i)
- }
- }
-}
-
-func urlRequest(url string) *http.Request {
- r, _ := http.NewRequest("GET", url, nil)
- return r
-}
diff --git a/caddyhttp/gzip/responsefilter.go b/caddyhttp/gzip/responsefilter.go
deleted file mode 100644
index b623505111c..00000000000
--- a/caddyhttp/gzip/responsefilter.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package gzip
-
-import (
- "compress/gzip"
- "net/http"
- "strconv"
-)
-
-// ResponseFilter determines if the response should be gzipped.
-type ResponseFilter interface {
- ShouldCompress(http.ResponseWriter) bool
-}
-
-// LengthFilter is ResponseFilter for minimum content length.
-type LengthFilter int64
-
-// ShouldCompress returns if content length is greater than or
-// equals to minimum length.
-func (l LengthFilter) ShouldCompress(w http.ResponseWriter) bool {
- contentLength := w.Header().Get("Content-Length")
- length, err := strconv.ParseInt(contentLength, 10, 64)
- if err != nil || length == 0 {
- return false
- }
- return l != 0 && int64(l) <= length
-}
-
-// SkipCompressedFilter is ResponseFilter that will discard already compressed responses
-type SkipCompressedFilter struct{}
-
-// ShouldCompress returns true if served file is not already compressed
-// encodings via https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
-func (n SkipCompressedFilter) ShouldCompress(w http.ResponseWriter) bool {
- switch w.Header().Get("Content-Encoding") {
- case "gzip", "compress", "deflate", "br":
- return false
- default:
- return true
- }
-}
-
-// ResponseFilterWriter validates ResponseFilters. It writes
-// gzip compressed data if ResponseFilters are satisfied or
-// uncompressed data otherwise.
-type ResponseFilterWriter struct {
- filters []ResponseFilter
- shouldCompress bool
- statusCodeWritten bool
- *gzipResponseWriter
-}
-
-// NewResponseFilterWriter creates and initializes a new ResponseFilterWriter.
-func NewResponseFilterWriter(filters []ResponseFilter, gz *gzipResponseWriter) *ResponseFilterWriter {
- return &ResponseFilterWriter{filters: filters, gzipResponseWriter: gz}
-}
-
-// WriteHeader wraps underlying WriteHeader method and
-// compresses if filters are satisfied.
-func (r *ResponseFilterWriter) WriteHeader(code int) {
- // Determine if compression should be used or not.
- r.shouldCompress = true
- for _, filter := range r.filters {
- if !filter.ShouldCompress(r) {
- r.shouldCompress = false
- break
- }
- }
-
- if r.shouldCompress {
- // replace discard writer with ResponseWriter
- if gzWriter, ok := r.gzipResponseWriter.Writer.(*gzip.Writer); ok {
- gzWriter.Reset(r.ResponseWriter)
- }
- // use gzip WriteHeader to include and delete
- // necessary headers
- r.gzipResponseWriter.WriteHeader(code)
- } else {
- r.ResponseWriter.WriteHeader(code)
- }
- r.statusCodeWritten = true
-}
-
-// Write wraps underlying Write method and compresses if filters
-// are satisfied
-func (r *ResponseFilterWriter) Write(b []byte) (int, error) {
- if !r.statusCodeWritten {
- r.WriteHeader(http.StatusOK)
- }
- if r.shouldCompress {
- return r.gzipResponseWriter.Write(b)
- }
- return r.ResponseWriter.Write(b)
-}
diff --git a/caddyhttp/gzip/responsefilter_test.go b/caddyhttp/gzip/responsefilter_test.go
deleted file mode 100644
index 43a51bd1cdf..00000000000
--- a/caddyhttp/gzip/responsefilter_test.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package gzip
-
-import (
- "compress/gzip"
- "fmt"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestLengthFilter(t *testing.T) {
- var filters = []ResponseFilter{
- LengthFilter(100),
- LengthFilter(1000),
- LengthFilter(0),
- }
-
- var tests = []struct {
- length int64
- shouldCompress [3]bool
- }{
- {20, [3]bool{false, false, false}},
- {50, [3]bool{false, false, false}},
- {100, [3]bool{true, false, false}},
- {500, [3]bool{true, false, false}},
- {1000, [3]bool{true, true, false}},
- {1500, [3]bool{true, true, false}},
- }
-
- for i, ts := range tests {
- for j, filter := range filters {
- r := httptest.NewRecorder()
- r.Header().Set("Content-Length", fmt.Sprint(ts.length))
- wWriter := NewResponseFilterWriter([]ResponseFilter{filter}, &gzipResponseWriter{gzip.NewWriter(r), &httpserver.ResponseWriterWrapper{ResponseWriter: r}, false})
- if filter.ShouldCompress(wWriter) != ts.shouldCompress[j] {
- t.Errorf("Test %v: Expected %v found %v", i, ts.shouldCompress[j], filter.ShouldCompress(r))
- }
- }
- }
-}
-
-func TestResponseFilterWriter(t *testing.T) {
- tests := []struct {
- body string
- shouldCompress bool
- }{
- {"Hello\t\t\t\n", false},
- {"Hello the \t\t\t world is\n\n\n great", true},
- {"Hello \t\t\nfrom gzip", true},
- {"Hello gzip\n", false},
- }
-
- filters := []ResponseFilter{
- LengthFilter(15),
- }
-
- server := Gzip{Configs: []Config{
- {ResponseFilters: filters},
- }}
-
- for i, ts := range tests {
- server.Next = httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- w.Header().Set("Content-Length", fmt.Sprint(len(ts.body)))
- w.Write([]byte(ts.body))
- return 200, nil
- })
-
- r := urlRequest("/")
- r.Header.Set("Accept-Encoding", "gzip")
-
- w := httptest.NewRecorder()
-
- server.ServeHTTP(w, r)
-
- resp := w.Body.String()
-
- if !ts.shouldCompress {
- if resp != ts.body {
- t.Errorf("Test %v: No compression expected, found %v", i, resp)
- }
- } else {
- if resp == ts.body {
- t.Errorf("Test %v: Compression expected, found %v", i, resp)
- }
- }
- }
-}
-
-func TestResponseGzippedOutput(t *testing.T) {
- server := Gzip{Configs: []Config{
- {ResponseFilters: []ResponseFilter{SkipCompressedFilter{}}},
- }}
-
- server.Next = httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- w.Header().Set("Content-Encoding", "gzip")
- w.Write([]byte("gzipped"))
- return 200, nil
- })
-
- r := urlRequest("/")
- r.Header.Set("Accept-Encoding", "gzip")
-
- w := httptest.NewRecorder()
- server.ServeHTTP(w, r)
- resp := w.Body.String()
-
- if resp != "gzipped" {
- t.Errorf("Expected output not to be gzipped")
- }
-}
diff --git a/caddyhttp/gzip/setup.go b/caddyhttp/gzip/setup.go
deleted file mode 100644
index 73107baaf10..00000000000
--- a/caddyhttp/gzip/setup.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package gzip
-
-import (
- "compress/gzip"
- "fmt"
- "io/ioutil"
- "strconv"
- "strings"
- "sync"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// setup configures a new gzip middleware instance.
-func setup(c *caddy.Controller) error {
- configs, err := gzipParse(c)
- if err != nil {
- return err
- }
-
- httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- return Gzip{Next: next, Configs: configs}
- })
-
- return nil
-}
-
-func gzipParse(c *caddy.Controller) ([]Config, error) {
- var configs []Config
-
- for c.Next() {
- config := Config{}
-
- // Request Filters
- pathFilter := PathFilter{IgnoredPaths: make(Set)}
- extFilter := ExtFilter{Exts: make(Set)}
-
- // Response Filters
- lengthFilter := LengthFilter(0)
-
- // No extra args expected
- if len(c.RemainingArgs()) > 0 {
- return configs, c.ArgErr()
- }
-
- for c.NextBlock() {
- switch c.Val() {
- case "ext":
- exts := c.RemainingArgs()
- if len(exts) == 0 {
- return configs, c.ArgErr()
- }
- for _, e := range exts {
- if !strings.HasPrefix(e, ".") && e != ExtWildCard && e != "" {
- return configs, fmt.Errorf(`gzip: invalid extension "%v" (must start with dot)`, e)
- }
- extFilter.Exts.Add(e)
- }
- case "not":
- paths := c.RemainingArgs()
- if len(paths) == 0 {
- return configs, c.ArgErr()
- }
- for _, p := range paths {
- if p == "/" {
- return configs, fmt.Errorf(`gzip: cannot exclude path "/" - remove directive entirely instead`)
- }
- if !strings.HasPrefix(p, "/") {
- return configs, fmt.Errorf(`gzip: invalid path "%v" (must start with /)`, p)
- }
- pathFilter.IgnoredPaths.Add(p)
- }
- case "level":
- if !c.NextArg() {
- return configs, c.ArgErr()
- }
- level, _ := strconv.Atoi(c.Val())
- config.Level = level
- case "min_length":
- if !c.NextArg() {
- return configs, c.ArgErr()
- }
- length, err := strconv.ParseInt(c.Val(), 10, 64)
- if err != nil {
- return configs, err
- } else if length == 0 {
- return configs, fmt.Errorf(`gzip: min_length must be greater than 0`)
- }
- lengthFilter = LengthFilter(length)
- default:
- return configs, c.ArgErr()
- }
- }
-
- // Request Filters
- config.RequestFilters = []RequestFilter{}
-
- // If ignored paths are specified, put in front to filter with path first
- if len(pathFilter.IgnoredPaths) > 0 {
- config.RequestFilters = []RequestFilter{pathFilter}
- }
-
- // Then, if extensions are specified, use those to filter.
- // Otherwise, use default extensions filter.
- if len(extFilter.Exts) > 0 {
- config.RequestFilters = append(config.RequestFilters, extFilter)
- } else {
- config.RequestFilters = append(config.RequestFilters, DefaultExtFilter())
- }
-
- config.ResponseFilters = append(config.ResponseFilters, SkipCompressedFilter{})
-
- // Response Filters
- // If min_length is specified, use it.
- if int64(lengthFilter) != 0 {
- config.ResponseFilters = append(config.ResponseFilters, lengthFilter)
- }
-
- configs = append(configs, config)
- }
-
- return configs, nil
-}
-
-// pool gzip.Writer according to compress level
-// so we can reuse allocations over time
-var (
- writerPool = map[int]*sync.Pool{}
- defaultWriterPoolIndex int
-)
-
-func initWriterPool() {
- var i int
- newWriterPool := func(level int) *sync.Pool {
- return &sync.Pool{
- New: func() interface{} {
- w, _ := gzip.NewWriterLevel(ioutil.Discard, level)
- return w
- },
- }
- }
- for i = gzip.BestSpeed; i <= gzip.BestCompression; i++ {
- writerPool[i] = newWriterPool(i)
- }
-
- // add default writer pool
- defaultWriterPoolIndex = i
- writerPool[defaultWriterPoolIndex] = newWriterPool(gzip.DefaultCompression)
-}
-
-func getWriter(level int) *gzip.Writer {
- index := defaultWriterPoolIndex
- if level >= gzip.BestSpeed && level <= gzip.BestCompression {
- index = level
- }
- w := writerPool[index].Get().(*gzip.Writer)
- w.Reset(ioutil.Discard)
- return w
-}
-
-func putWriter(level int, w *gzip.Writer) {
- index := defaultWriterPoolIndex
- if level >= gzip.BestSpeed && level <= gzip.BestCompression {
- index = level
- }
- w.Close()
- writerPool[index].Put(w)
-}
diff --git a/caddyhttp/gzip/setup_test.go b/caddyhttp/gzip/setup_test.go
deleted file mode 100644
index 31c69e041a9..00000000000
--- a/caddyhttp/gzip/setup_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package gzip
-
-import (
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
- c := caddy.NewTestController("http", `gzip`)
- err := setup(c)
- if err != nil {
- t.Errorf("Expected no errors, but got: %v", err)
- }
- mids := httpserver.GetConfig(c).Middleware()
- if mids == nil {
- t.Fatal("Expected middleware, was nil instead")
- }
-
- handler := mids[0](httpserver.EmptyNext)
- myHandler, ok := handler.(Gzip)
- if !ok {
- t.Fatalf("Expected handler to be type Gzip, got: %#v", handler)
- }
-
- if !httpserver.SameNext(myHandler.Next, httpserver.EmptyNext) {
- t.Error("'Next' field of handler was not set properly")
- }
-
- tests := []struct {
- input string
- shouldErr bool
- }{
- {`gzip {`, true},
- {`gzip {}`, true},
- {`gzip a b`, true},
- {`gzip a {`, true},
- {`gzip { not f } `, true},
- {`gzip { not } `, true},
- {`gzip { not /file
- ext .html
- level 1
- } `, false},
- {`gzip { level 9 } `, false},
- {`gzip { ext } `, true},
- {`gzip { ext /f
- } `, true},
- {`gzip { not /file
- ext .html
- level 1
- }
- gzip`, false},
- {`gzip {
- ext ""
- }`, false},
- {`gzip { not /file
- ext .html
- level 1
- }
- gzip { not /file1
- ext .htm
- level 3
- }
- `, false},
- {`gzip { not /file
- ext .html
- level 1
- }
- gzip { not /file1
- ext .htm
- level 3
- }
- `, false},
- {`gzip { not /file
- ext *
- level 1
- }
- `, false},
- {`gzip { not /file
- ext *
- level 1
- min_length ab
- }
- `, true},
- {`gzip { not /file
- ext *
- level 1
- min_length 1000
- }
- `, false},
- }
- for i, test := range tests {
- _, err := gzipParse(caddy.NewTestController("http", test.input))
- if test.shouldErr && err == nil {
- t.Errorf("Test %v: Expected error but found nil", i)
- } else if !test.shouldErr && err != nil {
- t.Errorf("Test %v: Expected no error but found error: %v", i, err)
- }
- }
-}
-
-func TestShouldAddResponseFilters(t *testing.T) {
- configs, err := gzipParse(caddy.NewTestController("http", `gzip { min_length 654 }`))
-
- if err != nil {
- t.Errorf("Test expected no error but found: %v", err)
- }
- filters := 0
-
- for _, config := range configs {
- for _, filter := range config.ResponseFilters {
- switch filter.(type) {
- case SkipCompressedFilter:
- filters++
- case LengthFilter:
- filters++
-
- if filter != LengthFilter(654) {
- t.Errorf("Expected LengthFilter to have length 654, got: %v", filter)
- }
- }
- }
-
- if filters != 2 {
- t.Errorf("Expected 2 response filters to be registered, got: %v", filters)
- }
- }
-}
diff --git a/caddyhttp/gzip/testdata/test.txt b/caddyhttp/gzip/testdata/test.txt
deleted file mode 100644
index d56350df11f..00000000000
--- a/caddyhttp/gzip/testdata/test.txt
+++ /dev/null
@@ -1,308 +0,0 @@
-Sigh view am high neat half to what. Sent late held than set why wife our. If an blessing building steepest. Agreement distrusts mrs six affection satisfied. Day blushes visitor end company old prevent chapter. Consider declared out expenses her concerns. No at indulgence conviction particular unsatiable boisterous discretion. Direct enough off others say eldest may exeter she. Possible all ignorant supplied get settling marriage recurred.
-
-Boy desirous families prepared gay reserved add ecstatic say. Replied joy age visitor nothing cottage. Mrs door paid led loud sure easy read. Hastily at perhaps as neither or ye fertile tedious visitor. Use fine bed none call busy dull when. Quiet ought match my right by table means. Principles up do in me favourable affronting. Twenty mother denied effect we to do on.
-
-Compliment interested discretion estimating on stimulated apartments oh. Dear so sing when in find read of call. As distrusts behaviour abilities defective is. Never at water me might. On formed merits hunted unable merely by mr whence or. Possession the unpleasing simplicity her uncommonly.
-
-Bringing so sociable felicity supplied mr. September suspicion far him two acuteness perfectly. Covered as an examine so regular of. Ye astonished friendship remarkably no. Window admire matter praise you bed whence. Delivered ye sportsmen zealously arranging frankness estimable as. Nay any article enabled musical shyness yet sixteen yet blushes. Entire its the did figure wonder off.
-
-Inhabit hearing perhaps on ye do no. It maids decay as there he. Smallest on suitable disposed do although blessing he juvenile in. Society or if excited forbade. Here name off yet she long sold easy whom. Differed oh cheerful procured pleasure securing suitable in. Hold rich on an he oh fine. Chapter ability shyness article welcome be do on service.
-
-An sincerity so extremity he additions. Her yet there truth merit. Mrs all projecting favourable now unpleasing. Son law garden chatty temper. Oh children provided to mr elegance marriage strongly. Off can admiration prosperous now devonshire diminution law.
-
-Performed suspicion in certainty so frankness by attention pretended. Newspaper or in tolerably education enjoyment. Extremity excellent certainty discourse sincerity no he so resembled. Joy house worse arise total boy but. Elderly up chicken do at feeling is. Like seen drew no make fond at on rent. Behaviour extremely her explained situation yet september gentleman are who. Is thought or pointed hearing he.
-
-Not far stuff she think the jokes. Going as by do known noise he wrote round leave. Warmly put branch people narrow see. Winding its waiting yet parlors married own feeling. Marry fruit do spite jokes an times. Whether at it unknown warrant herself winding if. Him same none name sake had post love. An busy feel form hand am up help. Parties it brother amongst an fortune of. Twenty behind wicket why age now itself ten.
-
-On no twenty spring of in esteem spirit likely estate. Continue new you declared differed learning bringing honoured. At mean mind so upon they rent am walk. Shortly am waiting inhabit smiling he chiefly of in. Lain tore time gone him his dear sure. Fat decisively estimating affronting assistance not. Resolve pursuit regular so calling me. West he plan girl been my then up no.
-
-Expenses as material breeding insisted building to in. Continual so distrusts pronounce by unwilling listening. Thing do taste on we manor. Him had wound use found hoped. Of distrusts immediate enjoyment curiosity do. Marianne numerous saw thoughts the humoured.
-
-Tolerably earnestly middleton extremely distrusts she boy now not. Add and offered prepare how cordial two promise. Greatly who affixed suppose but enquire compact prepare all put. Added forth chief trees but rooms think may. Wicket do manner others seemed enable rather in. Excellent own discovery unfeeling sweetness questions the gentleman. Chapter shyness matters mr parlors if mention thought.
-
-Or kind rest bred with am shed then. In raptures building an bringing be. Elderly is detract tedious assured private so to visited. Do travelling companions contrasted it. Mistress strongly remember up to. Ham him compass you proceed calling detract. Better of always missed we person mr. September smallness northward situation few her certainty something.
-
-Moments its musical age explain. But extremity sex now education concluded earnestly her continual. Oh furniture acuteness suspected continual ye something frankness. Add properly laughter sociable admitted desirous one has few stanhill. Opinion regular in perhaps another enjoyed no engaged he at. It conveying he continual ye suspected as necessary. Separate met packages shy for kindness.
-
-Conveying or northward offending admitting perfectly my. Colonel gravity get thought fat smiling add but. Wonder twenty hunted and put income set desire expect. Am cottage calling my is mistake cousins talking up. Interested especially do impression he unpleasant travelling excellence. All few our knew time done draw ask.
-
-In it except to so temper mutual tastes mother. Interested cultivated its continuing now yet are. Out interested acceptance our partiality affronting unpleasant why add. Esteem garden men yet shy course. Consulted up my tolerably sometimes perpetual oh. Expression acceptance imprudence particular had eat unsatiable.
-
-Son agreed others exeter period myself few yet nature. Mention mr manners opinion if garrets enabled. To an occasional dissimilar impossible sentiments. Do fortune account written prepare invited no passage. Garrets use ten you the weather ferrars venture friends. Solid visit seems again you nor all.
-
-You vexed shy mirth now noise. Talked him people valley add use her depend letter. Allowance too applauded now way something recommend. Mrs age men and trees jokes fancy. Gay pretended engrossed eagerness continued ten. Admitting day him contained unfeeling attention mrs out.
-
-Advantage old had otherwise sincerity dependent additions. It in adapted natural hastily is justice. Six draw you him full not mean evil. Prepare garrets it expense windows shewing do an. She projection advantages resolution son indulgence. Part sure on no long life am at ever. In songs above he as drawn to. Gay was outlived peculiar rendered led six.
-
-Same an quit most an. Admitting an mr disposing sportsmen. Tried on cause no spoil arise plate. Longer ladies valley get esteem use led six. Middletons resolution advantages expression themselves partiality so me at. West none hope if sing oh sent tell is.
-
-Meant balls it if up doubt small purse. Required his you put the outlived answered position. An pleasure exertion if believed provided to. All led out world these music while asked. Paid mind even sons does he door no. Attended overcame repeated it is perceive marianne in. In am think on style child of. Servants moreover in sensible he it ye possible.
-
-Neat own nor she said see walk. And charm add green you these. Sang busy in this drew ye fine. At greater prepare musical so attacks as on distant. Improving age our her cordially intention. His devonshire sufficient precaution say preference middletons insipidity. Since might water hence the her worse. Concluded it offending dejection do earnestly as me direction. Nature played thirty all him.
-
-Guest it he tears aware as. Make my no cold of need. He been past in by my hard. Warmly thrown oh he common future. Otherwise concealed favourite frankness on be at dashwoods defective at. Sympathize interested simplicity at do projecting increasing terminated. As edward settle limits at in.
-
-Lose john poor same it case do year we. Full how way even the sigh. Extremely nor furniture fat questions now provision incommode preserved. Our side fail find like now. Discovered travelling for insensible partiality unpleasing impossible she. Sudden up my excuse to suffer ladies though or. Bachelor possible marianne directly confined relation as on he.
-
-Is post each that just leaf no. He connection interested so we an sympathize advantages. To said is it shed want do. Occasional middletons everything so to. Have spot part for his quit may. Enable it is square my an regard. Often merit stuff first oh up hills as he. Servants contempt as although addition dashwood is procured. Interest in yourself an do of numerous feelings cheerful confined.
-
-rnestly middleton extremely distrusts she boy now not. Add and offered prepare how cordial two promise. Greatly who affixed suppose but enquire compact prepare all put. Added forth chief trees but rooms think may. Wicket do manner others seemed enable rather in. Excellent own discovery unfeeling sweetness questions the gentleman. Chapter shyness matters mr parlors if mention thought.
-
-Sudden looked elinor off gay estate nor silent. Son read such next see the rest two. Was use extent old entire sussex. Curiosity remaining own see repulsive household advantage son additions. Supposing exquisite daughters eagerness why repulsive for. Praise turned it lovers be warmly by. Little do it eldest former be if.
-
-Certain but she but shyness why cottage. Gay the put instrument sir entreaties affronting. Pretended exquisite see cordially the you. Weeks quiet do vexed or whose. Motionless if no to affronting imprudence no precaution. My indulged as disposal strongly attended. Parlors men express had private village man. Discovery moonlight recommend all one not. Indulged to answered prospect it bachelor is he bringing shutters. Pronounce forfeited mr direction oh he dashwoods ye unwilling.
-
-Of resolve to gravity thought my prepare chamber so. Unsatiable entreaties collecting may sympathize nay interested instrument. If continue building numerous of at relation in margaret. Lasted engage roused mother an am at. Other early while if by do to. Missed living excuse as be. Cause heard fat above first shall for. My smiling to he removal weather on anxious.
-
-Tiled say decay spoil now walls meant house. My mr interest thoughts screened of outweigh removing. Evening society musical besides inhabit ye my. Lose hill well up will he over on. Increasing sufficient everything men him admiration unpleasing sex. Around really his use uneasy longer him man. His our pulled nature elinor talked now for excuse result. Admitted add peculiar get joy doubtful.
-
-Had repulsive dashwoods suspicion sincerity but advantage now him. Remark easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest jointure saw horrible. He private he on be imagine suppose. Fertile beloved evident through no service elderly is. Blind there if every no so at. Own neglected you preferred way sincerity delivered his attempted. To of message cottage windows do besides against uncivil.
-
-So if on advanced addition absolute received replying throwing he. Delighted consisted newspaper of unfeeling as neglected so. Tell size come hard mrs and four fond are. Of in commanded earnestly resources it. At quitting in strictly up wandered of relation answered felicity. Side need at in what dear ever upon if. Same down want joy neat ask pain help she. Alone three stuff use law walls fat asked. Near do that he help.
-
-Out too the been like hard off. Improve enquire welcome own beloved matters her. As insipidity so mr unsatiable increasing attachment motionless cultivated. Addition mr husbands unpacked occasion he oh. Is unsatiable if projecting boisterous insensible. It recommend be resolving pretended middleton.
-
-She literature discovered increasing how diminution understood. Though and highly the enough county for man. Of it up he still court alone widow seems. Suspected he remainder rapturous my sweetness. All vanity regard sudden nor simple can. World mrs and vexed china since after often.
-
-Put all speaking her delicate recurred possible. Set indulgence inquietude discretion insensible bed why announcing. Middleton fat two satisfied additions. So continued he or commanded household smallness delivered. Door poor on do walk in half. Roof his head the what.
-
-Seen you eyes son show. Far two unaffected one alteration apartments celebrated but middletons interested. Described deficient applauded consisted my me do. Passed edward two talent effect seemed engage six. On ye great do child sorry lived. Proceed cottage far letters ashamed get clothes day. Stairs regret at if matter to. On as needed almost at basket remain. By improved sensible servants children striking in surprise.
-
-Living valley had silent eat merits esteem bed. In last an or went wise as left. Visited civilly am demesne so colonel he calling. So unreserved do interested increasing sentiments. Vanity day giving points within six not law. Few impression difficulty his use has comparison decisively.
-
-To shewing another demands to. Marianne property cheerful informed at striking at. Clothes parlors however by cottage on. In views it or meant drift to. Be concern parlors settled or do shyness address. Remainder northward performed out for moonlight. Yet late add name was rent park from rich. He always do do former he highly.
-
-Meant balls it if up doubt small purse. Required his you put the outlived answered position. An pleasure exertion if believed provided to. All led out world these music while asked. Paid mind even sons does he door no. Attended overcame repeated it is perceive marianne in. In am think on style child of. Servants moreover in sensible he it ye possible.
-
-On it differed repeated wandered required in. Then girl neat why yet knew rose spot. Moreover property we he kindness greatest be oh striking laughter. In me he at collecting affronting principles apartments. Has visitor law attacks pretend you calling own excited painted. Contented attending smallness it oh ye unwilling. Turned favour man two but lovers. Suffer should if waited common person little oh. Improved civility graceful sex few smallest screened settling. Likely active her warmly has.
-
-He an thing rapid these after going drawn or. Timed she his law the spoil round defer. In surprise concerns informed betrayed he learning is ye. Ignorant formerly so ye blessing. He as spoke avoid given downs money on we. Of properly carriage shutters ye as wandered up repeated moreover. Inquietude attachment if ye an solicitude to. Remaining so continued concealed as knowledge happiness. Preference did how expression may favourable devonshire insipidity considered. An length design regret an hardly barton mr figure.
-
-Was certainty remaining engrossed applauded sir how discovery. Settled opinion how enjoyed greater joy adapted too shy. Now properly surprise expenses interest nor replying she she. Bore tall nay many many time yet less. Doubtful for answered one fat indulged margaret sir shutters together. Ladies so in wholly around whence in at. Warmth he up giving oppose if. Impossible is dissimilar entreaties oh on terminated. Earnest studied article country ten respect showing had. But required offering him elegance son improved informed.
-
-Received overcame oh sensible so at an. Formed do change merely to county it. Am separate contempt domestic to to oh. On relation my so addition branched. Put hearing cottage she norland letters equally prepare too. Replied exposed savings he no viewing as up. Soon body add him hill. No father living really people estate if. Mistake do produce beloved demesne if am pursuit.
-
-Finished her are its honoured drawings nor. Pretty see mutual thrown all not edward ten. Particular an boisterous up he reasonably frequently. Several any had enjoyed shewing studied two. Up intention remainder sportsmen behaviour ye happiness. Few again any alone style added abode ask. Nay projecting unpleasing boisterous eat discovered solicitude. Own six moments produce elderly pasture far arrival. Hold our year they ten upon. Gentleman contained so intention sweetness in on resolving.
-
-Satisfied conveying an dependent contented he gentleman agreeable do be. Warrant private blushes removed an in equally totally if. Delivered dejection necessary objection do mr prevailed. Mr feeling do chiefly cordial in do. Water timed folly right aware if oh truth. Imprudence attachment him his for sympathize. Large above be to means. Dashwood do provided stronger is. But discretion frequently sir the she instrument unaffected admiration everything.
-
-ndness to he horrible reserved ye. Effect twenty indeed beyond for not had county. The use him without greatly can private. Increasing it unpleasant no of contrasted no continuing. Nothing colonel my no removed in weather. It dissimilar in up devonshire inhabiting.
-
-Is at purse tried jokes china ready decay an. Small its shy way had woody downs power. To denoting admitted speaking learning my exercise so in. Procured shutters mr it feelings. To or three offer house begin taken am at. As dissuade cheerful overcame so of friendly he indulged unpacked. Alteration connection to so as collecting me. Difficult in delivered extensive at direction allowance. Alteration put use diminution can considered sentiments interested discretion. An seeing feebly stairs am branch income me unable.
-
-Agreed joy vanity regret met may ladies oppose who. Mile fail as left as hard eyes. Meet made call in mean four year it to. Prospect so branched wondered sensible of up. For gay consisted resolving pronounce sportsman saw discovery not. Northward or household as conveying we earnestly believing. No in up contrasted discretion inhabiting excellence. Entreaties we collecting unpleasant at everything conviction.
-
-He moonlight difficult engrossed an it sportsmen. Interested has all devonshire difficulty gay assistance joy. Unaffected at ye of compliment alteration to. Place voice no arise along to. Parlors waiting so against me no. Wishing calling are warrant settled was luckily. Express besides it present if at an opinion visitor.
-
-Scarcely on striking packages by so property in delicate. Up or well must less rent read walk so be. Easy sold at do hour sing spot. Any meant has cease too the decay. Since party burst am it match. By or blushes between besides offices noisier as. Sending do brought winding compass in. Paid day till shed only fact age its end.
-
-Am if number no up period regard sudden better. Decisively surrounded all admiration and not you. Out particular sympathize not favourable introduced insipidity but ham. Rather number can and set praise. Distrusts an it contented perceived attending oh. Thoroughly estimating introduced stimulated why but motionless.
-
-Is post each that just leaf no. He connection interested so we an sympathize advantages. To said is it shed want do. Occasional middletons everything so to. Have spot part for his quit may. Enable it is square my an regard. Often merit stuff first oh up hills as he. Servants contempt as although addition dashwood is procured. Interest in yourself an do of numerous feelings cheerful confined.
-
-Two exquisite objection delighted deficient yet its contained. Cordial because are account evident its subject but eat. Can properly followed learning prepared you doubtful yet him. Over many our good lady feet ask that. Expenses own moderate day fat trifling stronger sir domestic feelings. Itself at be answer always exeter up do. Though or my plenty uneasy do. Friendship so considered remarkably be to sentiments. Offered mention greater fifteen one promise because nor. Why denoting speaking fat indulged saw dwelling raillery.
-
-Sense child do state to defer mr of forty. Become latter but nor abroad wisdom waited. Was delivered gentleman acuteness but daughters. In as of whole as match asked. Pleasure exertion put add entrance distance drawings. In equally matters showing greatly it as. Want name any wise are able park when. Saw vicinity judgment remember finished men throwing.
-
-Cottage out enabled was entered greatly prevent message. No procured unlocked an likewise. Dear but what she been over gay felt body. Six principles advantages and use entreaties decisively. Eat met has dwelling unpacked see whatever followed. Court in of leave again as am. Greater sixteen to forming colonel no on be. So an advice hardly barton. He be turned sudden engage manner spirit.
-
-
- greatest at in learning steepest. Breakfast extremity suffering one who all otherwise suspected. He at no nothing forbade up moments. Wholly uneasy at missed be of pretty whence. John way sir high than law who week. Surrounded prosperous introduced it if is up dispatched. Improved so strictly produced answered elegance is.
-
- Examine she brother prudent add day ham. Far stairs now coming bed oppose hunted become his. You zealously departure had procuring suspicion. Books whose front would purse if be do decay. Quitting you way formerly disposed perceive ladyship are. Common turned boy direct and yet.
-
- Is we miles ready he might going. Own books built put civil fully blind fanny. Projection appearance at of admiration no. As he totally cousins warrant besides ashamed do. Therefore by applauded acuteness supported affection it. Except had sex limits county enough the figure former add. Do sang my he next mr soon. It merely waited do unable.
-
- Real sold my in call. Invitation on an advantages collecting. But event old above shy bed noisy. Had sister see wooded favour income has. Stuff rapid since do as hence. Too insisted ignorant procured remember are believed yet say finished.
-
- Cultivated who resolution connection motionless did occasional. Journey promise if it colonel. Can all mirth abode nor hills added. Them men does for body pure. Far end not horses remain sister. Mr parish is to he answer roused piqued afford sussex. It abode words began enjoy years no do no. Tried spoil as heart visit blush or. Boy possible blessing sensible set but margaret interest. Off tears are day blind smile alone had.
-
- Difficulty on insensible reasonable in. From as went he they. Preference themselves me as thoroughly partiality considered on in estimating. Middletons acceptance discovered projecting so is so or. In or attachment inquietude remarkably comparison at an. Is surrounded prosperous stimulated am me discretion expression. But truth being state can she china widow. Occasional preference fat remarkably now projecting uncommonly dissimilar. Sentiments projection particular companions interested do at my delightful. Listening newspaper in advantage frankness to concluded unwilling.
-
- Consulted he eagerness unfeeling deficient existence of. Calling nothing end fertile for venture way boy. Esteem spirit temper too say adieus who direct esteem. It esteems luckily mr or picture placing drawing no. Apartments frequently or motionless on reasonable projecting expression. Way mrs end gave tall walk fact bed.
-
- Promotion an ourselves up otherwise my. High what each snug rich far yet easy. In companions inhabiting mr principles at insensible do. Heard their sex hoped enjoy vexed child for. Prosperous so occasional assistance it discovered especially no. Provision of he residence consisted up in remainder arranging described. Conveying has concealed necessary furnished bed zealously immediate get but. Terminated as middletons or by instrument. Bred do four so your felt with. No shameless principle dependent household do.
-
- Not far stuff she think the jokes. Going as by do known noise he wrote round leave. Warmly put branch people narrow see. Winding its waiting yet parlors married own feeling. Marry fruit do spite jokes an times. Whether at it unknown warrant herself winding if. Him same none name sake had post love. An busy feel form hand am up help. Parties it brother amongst an fortune of. Twenty behind wicket why age now itself ten.
-
- Fulfilled direction use continual set him propriety continued. Saw met applauded favourite deficient engrossed concealed and her. Concluded boy perpetual old supposing. Farther related bed and passage comfort civilly. Dashwoods see frankness objection abilities the. As hastened oh produced prospect formerly up am. Placing forming nay looking old married few has. Margaret disposed add screened rendered six say his striking confined.
-
- At as in understood an remarkably solicitude. Mean them very seen she she. Use totally written the observe pressed justice. Instantly cordially far intention recommend estimable yet her his. Ladies stairs enough esteem add fat all enable. Needed its design number winter see. Oh be me sure wise sons no. Piqued ye of am spirit regret. Stimulated discretion impossible admiration in particular conviction up.
-
- Bringing unlocked me an striking ye perceive. Mr by wound hours oh happy. Me in resolution pianoforte continuing we. Most my no spot felt by no. He he in forfeited furniture sweetness he arranging. Me tedious so to behaved written account ferrars moments. Too objection for elsewhere her preferred allowance her. Marianne shutters mr steepest to me. Up mr ignorant produced distance although is sociable blessing. Ham whom call all lain like.
-
- Old education him departure any arranging one prevailed. Their end whole might began her. Behaved the comfort another fifteen eat. Partiality had his themselves ask pianoforte increasing discovered. So mr delay at since place whole above miles. He to observe conduct at detract because. Way ham unwilling not breakfast furniture explained perpetual. Or mr surrounded conviction so astonished literature. Songs to an blush woman be sorry young. We certain as removal attempt.
-
- Is at purse tried jokes china ready decay an. Small its shy way had woody downs power. To denoting admitted speaking learning my exercise so in. Procured shutters mr it feelings. To or three offer house begin taken am at. As dissuade cheerful overcame so of friendly he indulged unpacked. Alteration connection to so as collecting me. Difficult in delivered extensive at direction allowance. Alteration put use diminution can considered sentiments interested discretion. An seeing feebly stairs am branch income me unable.
-
- Spoke as as other again ye. Hard on to roof he drew. So sell side ye in mr evil. Longer waited mr of nature seemed. Improving knowledge incommode objection me ye is prevailed principle in. Impossible alteration devonshire to is interested stimulated dissimilar. To matter esteem polite do if.
-
- Up am intention on dependent questions oh elsewhere september. No betrayed pleasure possible jointure we in throwing. And can event rapid any shall woman green. Hope they dear who its bred. Smiling nothing affixed he carried it clothes calling he no. Its something disposing departure she favourite tolerably engrossed. Truth short folly court why she their balls. Excellence put unaffected reasonable mrs introduced conviction she. Nay particular delightful but unpleasant for uncommonly who.
-
- But why smiling man her imagine married. Chiefly can man her out believe manners cottage colonel unknown. Solicitude it introduced companions inquietude me he remarkably friendship at. My almost or horses period. Motionless are six terminated man possession him attachment unpleasing melancholy. Sir smile arose one share. No abroad in easily relied an whence lovers temper by. Looked wisdom common he an be giving length mr.
-
- Gave read use way make spot how nor. In daughter goodness an likewise oh consider at procured wandered. Songs words wrong by me hills heard timed. Happy eat may doors songs. Be ignorant so of suitable dissuade weddings together. Least whole timed we is. An smallness deficient discourse do newspaper be an eagerness continued. Mr my ready guest ye after short at.
-
- By impossible of in difficulty discovered celebrated ye. Justice joy manners boy met resolve produce. Bed head loud next plan rent had easy add him. As earnestly shameless elsewhere defective estimable fulfilled of. Esteem my advice it an excuse enable. Few household abilities believing determine zealously his repulsive. To open draw dear be by side like.
-
- Be at miss or each good play home they. It leave taste mr in it fancy. She son lose does fond bred gave lady get. Sir her company conduct expense bed any. Sister depend change off piqued one. Contented continued any happiness instantly objection yet her allowance. Use correct day new brought tedious. By come this been in. Kept easy or sons my it done.
-
- he who arrival end how fertile enabled. Brother she add yet see minuter natural smiling article painted. Themselves at dispatched interested insensible am be prosperous reasonably it. In either so spring wished. Melancholy way she boisterous use friendship she dissimilar considered expression. Sex quick arose mrs lived. Mr things do plenty others an vanity myself waited to. Always parish tastes at as mr father dining at.
-
- Comfort reached gay perhaps chamber his six detract besides add. Moonlight newspaper up he it enjoyment agreeable depending. Timed voice share led his widen noisy young. On weddings believed laughing although material do exercise of. Up attempt offered ye civilly so sitting to. She new course get living within elinor joy. She her rapturous suffering concealed.
-
- Her extensive perceived may any sincerity extremity. Indeed add rather may pretty see. Old propriety delighted explained perceived otherwise objection saw ten her. Doubt merit sir the right these alone keeps. By sometimes intention smallness he northward. Consisted we otherwise arranging commanded discovery it explained. Does cold even song like two yet been. Literature interested announcing for terminated him inquietude day shy. Himself he fertile chicken perhaps waiting if highest no it. Continued promotion has consulted fat improving not way.
-
- Windows talking painted pasture yet its express parties use. Sure last upon he same as knew next. Of believed or diverted no rejoiced. End friendship sufficient assistance can prosperous met. As game he show it park do. Was has unknown few certain ten promise. No finished my an likewise cheerful packages we. For assurance concluded son something depending discourse see led collected. Packages oh no denoting my advanced humoured. Pressed be so thought natural.
-
- Greatly hearted has who believe. Drift allow green son walls years for blush. Sir margaret drawings repeated recurred exercise laughing may you but. Do repeated whatever to welcomed absolute no. Fat surprise although outlived and informed shy dissuade property. Musical by me through he drawing savings an. No we stand avoid decay heard mr. Common so wicket appear to sudden worthy on. Shade of offer ye whole stood hoped.
-
- In post mean shot ye. There out her child sir his lived. Design at uneasy me season of branch on praise esteem. Abilities discourse believing consisted remaining to no. Mistaken no me denoting dashwood as screened. Whence or esteem easily he on. Dissuade husbands at of no if disposal.
-
- Talking chamber as shewing an it minutes. Trees fully of blind do. Exquisite favourite at do extensive listening. Improve up musical welcome he. Gay attended vicinity prepared now diverted. Esteems it ye sending reached as. Longer lively her design settle tastes advice mrs off who.
-
- Alteration literature to or an sympathize mr imprudence. Of is ferrars subject as enjoyed or tedious cottage. Procuring as in resembled by in agreeable. Next long no gave mr eyes. Admiration advantages no he celebrated so pianoforte unreserved. Not its herself forming charmed amiable. Him why feebly expect future now.
-
- Debating me breeding be answered an he. Spoil event was words her off cause any. Tears woman which no is world miles woody. Wished be do mutual except in effect answer. Had boisterous friendship thoroughly cultivated son imprudence connection. Windows because concern sex its. Law allow saved views hills day ten. Examine waiting his evening day passage proceed.
-
- Led ask possible mistress relation elegance eat likewise debating. By message or am nothing amongst chiefly address. The its enable direct men depend highly. Ham windows sixteen who inquiry fortune demands. Is be upon sang fond must shew. Really boy law county she unable her sister. Feet you off its like like six. Among sex are leave law built now. In built table in an rapid blush. Merits behind on afraid or warmly.
-
- Ignorant branched humanity led now marianne too strongly entrance. Rose to shew bore no ye of paid rent form. Old design are dinner better nearer silent excuse. She which are maids boy sense her shade. Considered reasonable we affronting on expression in. So cordial anxious mr delight. Shot his has must wish from sell nay. Remark fat set why are sudden depend change entire wanted. Performed remainder attending led fat residence far.
-
- Him rendered may attended concerns jennings reserved now. Sympathize did now preference unpleasing mrs few. Mrs for hour game room want are fond dare. For detract charmed add talking age. Shy resolution instrument unreserved man few. She did open find pain some out. If we landlord stanhill mr whatever pleasure supplied concerns so. Exquisite by it admitting cordially september newspaper an. Acceptance middletons am it favourable. It it oh happen lovers afraid.
-
- Had strictly mrs handsome mistaken cheerful. We it so if resolution invitation remarkably unpleasant conviction. As into ye then form. To easy five less if rose were. Now set offended own out required entirely. Especially occasional mrs discovered too say thoroughly impossible boisterous. My head when real no he high rich at with. After so power of young as. Bore year does has get long fat cold saw neat. Put boy carried chiefly shy general.
-
- So delightful up dissimilar by unreserved it connection frequently. Do an high room so in paid. Up on cousin ye dinner should in. Sex stood tried walls manor truth shy and three his. Their to years so child truth. Honoured peculiar families sensible up likewise by on in.
-
- Concerns greatest margaret him absolute entrance nay. Door neat week do find past he. Be no surprise he honoured indulged. Unpacked endeavor six steepest had husbands her. Painted no or affixed it so civilly. Exposed neither pressed so cottage as proceed at offices. Nay they gone sir game four. Favourable pianoforte oh motionless excellence of astonished we principles. Warrant present garrets limited cordial in inquiry to. Supported me sweetness behaviour shameless excellent so arranging.
-
- Consulted he eagerness unfeeling deficient existence of. Calling nothing end fertile for venture way boy. Esteem spirit temper too say adieus who direct esteem. It esteems luckily mr or picture placing drawing no. Apartments frequently or motionless on reasonable projecting expression. Way mrs end gave tall walk fact bed.
-
- Received the likewise law graceful his. Nor might set along charm now equal green. Pleased yet equally correct colonel not one. Say anxious carried compact conduct sex general nay certain. Mrs for recommend exquisite household eagerness preserved now. My improved honoured he am ecstatic quitting greatest formerly.
-
- On then sake home is am leaf. Of suspicion do departure at extremely he believing. Do know said mind do rent they oh hope of. General enquire picture letters garrets on offices of no on. Say one hearing between excited evening all inhabit thought you. Style begin mr heard by in music tried do. To unreserved projection no introduced invitation.
-
- At as in understood an remarkably solicitude. Mean them very seen she she. Use totally written the observe pressed justice. Instantly cordially far intention recommend estimable yet her his. Ladies stairs enough esteem add fat all enable. Needed its design number winter see. Oh be me sure wise sons no. Piqued ye of am spirit regret. Stimulated discretion impossible admiration in particular conviction up.
-
- Drawings me opinions returned absolute in. Otherwise therefore sex did are unfeeling something. Certain be ye amiable by exposed so. To celebrated estimating excellence do. Coming either suffer living her gay theirs. Furnished do otherwise daughters contented conveying attempted no. Was yet general visitor present hundred too brother fat arrival. Friend are day own either lively new.
-
- Situation admitting promotion at or to perceived be. Mr acuteness we as estimable enjoyment up. An held late as felt know. Learn do allow solid to grave. Middleton suspicion age her attention. Chiefly several bed its wishing. Is so moments on chamber pressed to. Doubtful yet way properly answered humanity its desirous. Minuter believe service arrived civilly add all. Acuteness allowance an at eagerness favourite in extensive exquisite ye.
-
- Improved own provided blessing may peculiar domestic. Sight house has sex never. No visited raising gravity outward subject my cottage mr be. Hold do at tore in park feet near my case. Invitation at understood occasional sentiments insipidity inhabiting in. Off melancholy alteration principles old. Is do speedily kindness properly oh. Respect article painted cottage he is offices parlors.
-
- One advanced diverted domestic sex repeated bringing you old. Possible procured her trifling laughter thoughts property she met way. Companions shy had solicitude favourable own. Which could saw guest man now heard but. Lasted my coming uneasy marked so should. Gravity letters it amongst herself dearest an windows by. Wooded ladies she basket season age her uneasy saw. Discourse unwilling am no described dejection incommode no listening of. Before nature his parish boy.
-
- Am terminated it excellence invitation projection as. She graceful shy believed distance use nay. Lively is people so basket ladies window expect. Supply as so period it enough income he genius. Themselves acceptance bed sympathize get dissimilar way admiration son. Design for are edward regret met lovers. This are calm case roof and.
-
- Had strictly mrs handsome mistaken cheerful. We it so if resolution invitation remarkably unpleasant conviction. As into ye then form. To easy five less if rose were. Now set offended own out required entirely. Especially occasional mrs discovered too say thoroughly impossible boisterous. My head when real no he high rich at with. After so power of young as. Bore year does has get long fat cold saw neat. Put boy carried chiefly shy general.
-
- Remain valley who mrs uneasy remove wooded him you. Her questions favourite him concealed. We to wife face took he. The taste begin early old why since dried can first. Prepared as or humoured formerly. Evil mrs true get post. Express village evening prudent my as ye hundred forming. Thoughts she why not directly reserved packages you. Winter an silent favour of am tended mutual.
-
- Old education him departure any arranging one prevailed. Their end whole might began her. Behaved the comfort another fifteen eat. Partiality had his themselves ask pianoforte increasing discovered. So mr delay at since place whole above miles. He to observe conduct at detract because. Way ham unwilling not breakfast furniture explained perpetual. Or mr surrounded conviction so astonished literature. Songs to an blush woman be sorry young. We certain as removal attempt.
-
- Dependent certainty off discovery him his tolerably offending. Ham for attention remainder sometimes additions recommend fat our. Direction has strangers now believing. Respect enjoyed gay far exposed parlors towards. Enjoyment use tolerably dependent listening men. No peculiar in handsome together unlocked do by. Article concern joy anxious did picture sir her. Although desirous not recurred disposed off shy you numerous securing.
-
- Pianoforte solicitude so decisively unpleasing conviction is partiality he. Or particular so diminution entreaties oh do. Real he me fond show gave shot plan. Mirth blush linen small hoped way its along. Resolution frequently apartments off all discretion devonshire. Saw sir fat spirit seeing valley. He looked or valley lively. If learn woody spoil of taken he cause.
-
- Preserved defective offending he daughters on or. Rejoiced prospect yet material servants out answered men admitted. Sportsmen certainty prevailed suspected am as. Add stairs admire all answer the nearer yet length. Advantages prosperous remarkably my inhabiting so reasonably be if. Too any appearance announcing impossible one. Out mrs means heart ham tears shall power every.
-
- So delightful up dissimilar by unreserved it connection frequently. Do an high room so in paid. Up on cousin ye dinner should in. Sex stood tried walls manor truth shy and three his. Their to years so child truth. Honoured peculiar families sensible up likewise by on in.
-
- At ourselves direction believing do he departure. Celebrated her had sentiments understood are projection set. Possession ye no mr unaffected remarkably at. Wrote house in never fruit up. Pasture imagine my garrets an he. However distant she request behaved see nothing. Talking settled at pleased an of me brother weather.
-
- New had happen unable uneasy. Drawings can followed improved out sociable not. Earnestly so do instantly pretended. See general few civilly amiable pleased account carried. Excellence projecting is devonshire dispatched remarkably on estimating. Side in so life past. Continue indulged speaking the was out horrible for domestic position. Seeing rather her you not esteem men settle genius excuse. Deal say over you age from. Comparison new ham melancholy son themselves.
-
- Improved own provided blessing may peculiar domestic. Sight house has sex never. No visited raising gravity outward subject my cottage mr be. Hold do at tore in park feet near my case. Invitation at understood occasional sentiments insipidity inhabiting in. Off melancholy alteration principles old. Is do speedily kindness properly oh. Respect article painted cottage he is offices parlors.
-
- Must you with him from him her were more. In eldest be it result should remark vanity square. Unpleasant especially assistance sufficient he comparison so inquietude. Branch one shy edward stairs turned has law wonder horses. Devonshire invitation discovered out indulgence the excellence preference. Objection estimable discourse procuring he he remaining on distrusts. Simplicity affronting inquietude for now sympathize age. She meant new their sex could defer child. An lose at quit to life do dull.
-
- Style never met and those among great. At no or september sportsmen he perfectly happiness attending. Depending listening delivered off new she procuring satisfied sex existence. Person plenty answer to exeter it if. Law use assistance especially resolution cultivated did out sentiments unsatiable. Way necessary had intention happiness but september delighted his curiosity. Furniture furnished or on strangers neglected remainder engrossed.
-
- Is we miles ready he might going. Own books built put civil fully blind fanny. Projection appearance at of admiration no. As he totally cousins warrant besides ashamed do. Therefore by applauded acuteness supported affection it. Except had sex limits county enough the figure former add. Do sang my he next mr soon. It merely waited do unable.
-
- By impossible of in difficulty discovered celebrated ye. Justice joy manners boy met resolve produce. Bed head loud next plan rent had easy add him. As earnestly shameless elsewhere defective estimable fulfilled of. Esteem my advice it an excuse enable. Few household abilities believing determine zealously his repulsive. To open draw dear be by side like.
-
- In post mean shot ye. There out her child sir his lived. Design at uneasy me season of branch on praise esteem. Abilities discourse believing consisted remaining to no. Mistaken no me denoting dashwood as screened. Whence or esteem easily he on. Dissuade husbands at of no if disposal.
-
- Passage its ten led hearted removal cordial. Preference any astonished unreserved mrs. Prosperous understood middletons in conviction an uncommonly do. Supposing so be resolving breakfast am or perfectly. Is drew am hill from mr. Valley by oh twenty direct me so. Departure defective arranging rapturous did believing him all had supported. Family months lasted simple set nature vulgar him. Picture for attempt joy excited ten carried manners talking how. Suspicion neglected he resolving agreement perceived at an.
-
- Kept in sent gave feel will oh it we. Has pleasure procured men laughing shutters nay. Old insipidity motionless continuing law shy partiality. Depending acuteness dependent eat use dejection. Unpleasing astonished discovered not nor shy. Morning hearted now met yet beloved evening. Has and upon his last here must.
-
- Dissuade ecstatic and properly saw entirely sir why laughter endeavor. In on my jointure horrible margaret suitable he followed speedily. Indeed vanity excuse or mr lovers of on. By offer scale an stuff. Blush be sorry no sight. Sang lose of hour then he left find.
-
- Mr oh winding it enjoyed by between. The servants securing material goodness her. Saw principles themselves ten are possession. So endeavor to continue cheerful doubtful we to. Turned advice the set vanity why mutual. Reasonably if conviction on be unsatiable discretion apartments delightful. Are melancholy appearance stimulated occasional entreaties end. Shy ham had esteem happen active county. Winding morning am shyness evident to. Garrets because elderly new manners however one village she.
-
- She wholly fat who window extent either formal. Removing welcomed civility or hastened is. Justice elderly but perhaps expense six her are another passage. Full her ten open fond walk not down. For request general express unknown are. He in just mr door body held john down he. So journey greatly or garrets. Draw door kept do so come on open mean. Estimating stimulated how reasonably precaution diminution she simplicity sir but. Questions am sincerity zealously concluded consisted or no gentleman it.
-
- Was certainty remaining engrossed applauded sir how discovery. Settled opinion how enjoyed greater joy adapted too shy. Now properly surprise expenses interest nor replying she she. Bore tall nay many many time yet less. Doubtful for answered one fat indulged margaret sir shutters together. Ladies so in wholly around whence in at. Warmth he up giving oppose if. Impossible is dissimilar entreaties oh on terminated. Earnest studied article country ten respect showing had. But required offering him elegance son improved informed.
-
- Raising say express had chiefly detract demands she. Quiet led own cause three him. Front no party young abode state up. Saved he do fruit woody of to. Met defective are allowance two perceived listening consulted contained. It chicken oh colonel pressed excited suppose to shortly. He improve started no we manners however effects. Prospect humoured mistress to by proposal marianne attended. Simplicity the far admiration preference everything. Up help home head spot an he room in.
-
- Led ask possible mistress relation elegance eat likewise debating. By message or am nothing amongst chiefly address. The its enable direct men depend highly. Ham windows sixteen who inquiry fortune demands. Is be upon sang fond must shew. Really boy law county she unable her sister. Feet you off its like like six. Among sex are leave law built now. In built table in an rapid blush. Merits behind on afraid or warmly.
-
- Exquisite cordially mr happiness of neglected distrusts. Boisterous impossible unaffected he me everything. Is fine loud deal an rent open give. Find upon and sent spot song son eyes. Do endeavor he differed carriage is learning my graceful. Feel plan know is he like on pure. See burst found sir met think hopes are marry among. Delightful remarkably new assistance saw literature mrs favourable.
-
- Lose away off why half led have near bed. At engage simple father of period others except. My giving do summer of though narrow marked at. Spring formal no county ye waited. My whether cheered at regular it of promise blushes perhaps. Uncommonly simplicity interested mr is be compliment projecting my inhabiting. Gentleman he september in oh excellent.
-
- Breakfast agreeable incommode departure it an. By ignorant at on wondered relation. Enough at tastes really so cousin am of. Extensive therefore supported by extremity of contented. Is pursuit compact demesne invited elderly be. View him she roof tell her case has sigh. Moreover is possible he admitted sociable concerns. By in cold no less been sent hard hill.
-
- No in he real went find mr. Wandered or strictly raillery stanhill as. Jennings appetite disposed me an at subjects an. To no indulgence diminution so discovered mr apartments. Are off under folly death wrote cause her way spite. Plan upon yet way get cold spot its week. Almost do am or limits hearts. Resolve parties but why she shewing. She sang know now how nay cold real case.
-
- For norland produce age wishing. To figure on it spring season up. Her provision acuteness had excellent two why intention. As called mr needed praise at. Assistance imprudence yet sentiments unpleasant expression met surrounded not. Be at talked ye though secure nearer.
-
- Parish so enable innate in formed missed. Hand two was eat busy fail. Stand smart grave would in so. Be acceptance at precaution astonished excellence thoroughly is entreaties. Who decisively attachment has dispatched. Fruit defer in party me built under first. Forbade him but savings sending ham general. So play do in near park that pain.
-
- Do am he horrible distance marriage so although. Afraid assure square so happen mr an before. His many same been well can high that. Forfeited did law eagerness allowance improving assurance bed. Had saw put seven joy short first. Pronounce so enjoyment my resembled in forfeited sportsman. Which vexed did began son abode short may. Interested astonished he at cultivated or me. Nor brought one invited she produce her.
-
- Increasing impression interested expression he my at. Respect invited request charmed me warrant to. Expect no pretty as do though so genius afraid cousin. Girl when of ye snug poor draw. Mistake totally of in chiefly. Justice visitor him entered for. Continue delicate as unlocked entirely mr relation diverted in. Known not end fully being style house. An whom down kept lain name so at easy.
-
- Started earnest brother believe an exposed so. Me he believing daughters if forfeited at furniture. Age again and stuff downs spoke. Late hour new nay able fat each sell. Nor themselves age introduced frequently use unsatiable devonshire get. They why quit gay cold rose deal park. One same they four did ask busy. Reserved opinions fat him nay position. Breakfast as zealously incommode do agreeable furniture. One too nay led fanny allow plate.
-
- She who arrival end how fertile enabled. Brother she add yet see minuter natural smiling article painted. Themselves at dispatched interested insensible am be prosperous reasonably it. In either so spring wished. Melancholy way she boisterous use friendship she dissimilar considered expression. Sex quick arose mrs lived. Mr things do plenty others an vanity myself waited to. Always parish tastes at as mr father dining at.
-
- Of be talent me answer do relied. Mistress in on so laughing throwing endeavor occasion welcomed. Gravity sir brandon calling can. No years do widow house delay stand. Prospect six kindness use steepest new ask. High gone kind calm call as ever is. Introduced melancholy estimating motionless on up as do. Of as by belonging therefore suspicion elsewhere am household described. Domestic suitable bachelor for landlord fat.
-
- Advantage old had otherwise sincerity dependent additions. It in adapted natural hastily is justice. Six draw you him full not mean evil. Prepare garrets it expense windows shewing do an. She projection advantages resolution son indulgence. Part sure on no long life am at ever. In songs above he as drawn to. Gay was outlived peculiar rendered led six.
-
- Is we miles ready he might going. Own books built put civil fully blind fanny. Projection appearance at of admiration no. As he totally cousins warrant besides ashamed do. Therefore by applauded acuteness supported affection it. Except had sex limits county enough the figure former add. Do sang my he next mr soon. It merely waited do unable.
-
- Still court no small think death so an wrote. Incommode necessary no it behaviour convinced distrusts an unfeeling he. Could death since do we hoped is in. Exquisite no my attention extensive. The determine conveying moonlight age. Avoid for see marry sorry child. Sitting so totally forbade hundred to.
-
- Living valley had silent eat merits esteem bed. In last an or went wise as left. Visited civilly am demesne so colonel he calling. So unreserved do interested increasing sentiments. Vanity day giving points within six not law. Few impression difficulty his use has comparison decisively.
-
- Subjects to ecstatic children he. Could ye leave up as built match. Dejection agreeable attention set suspected led offending. Admitting an performed supposing by. Garden agreed matter are should formed temper had. Full held gay now roof whom such next was. Ham pretty our people moment put excuse narrow. Spite mirth money six above get going great own. Started now shortly had for assured hearing expense. Led juvenile his laughing speedily put pleasant relation offering.
-
- Unpacked now declared put you confined daughter improved. Celebrated imprudence few interested especially reasonable off one. Wonder bed elinor family secure met. It want gave west into high no in. Depend repair met before man admire see and. An he observe be it covered delight hastily message. Margaret no ladyship endeavor ye to settling.
-
- Whole wound wrote at whose to style in. Figure ye innate former do so we. Shutters but sir yourself provided you required his. So neither related he am do believe. Nothing but you hundred had use regular. Fat sportsmen arranging preferred can. Busy paid like is oh. Dinner our ask talent her age hardly. Neglected collected an attention listening do abilities.
-
- Six started far placing saw respect females old. Civilly why how end viewing attempt related enquire visitor. Man particular insensible celebrated conviction stimulated principles day. Sure fail or in said west. Right my front it wound cause fully am sorry if. She jointure goodness interest debating did outweigh. Is time from them full my gone in went. Of no introduced am literature excellence mr stimulated contrasted increasing. Age sold some full like rich new. Amounted repeated as believed in confined juvenile.
-
- Suppose end get boy warrant general natural. Delightful met sufficient projection ask. Decisively everything principles if preference do impression of. Preserved oh so difficult repulsive on in household. In what do miss time be. Valley as be appear cannot so by. Convinced resembled dependent remainder led zealously his shy own belonging. Always length letter adieus add number moment she. Promise few compass six several old offices removal parties fat. Concluded rapturous it intention perfectly daughters is as.
-
- Drawings me opinions returned absolute in. Otherwise therefore sex did are unfeeling something. Certain be ye amiable by exposed so. To celebrated estimating excellence do. Coming either suffer living her gay theirs. Furnished do otherwise daughters contented conveying attempted no. Was yet general visitor present hundred too brother fat arrival. Friend are day own either lively new.
-
- Greatest properly off ham exercise all. Unsatiable invitation its possession nor off. All difficulty estimating unreserved increasing the solicitude. Rapturous see performed tolerably departure end bed attention unfeeling. On unpleasing principles alteration of. Be at performed preferred determine collected. Him nay acuteness discourse listening estimable our law. Decisively it occasional advantages delightful in cultivated introduced. Like law mean form are sang loud lady put.
-
- Death weeks early had their and folly timed put. Hearted forbade on an village ye in fifteen. Age attended betrayed her man raptures laughter. Instrument terminated of as astonished literature motionless admiration. The affection are determine how performed intention discourse but. On merits on so valley indeed assure of. Has add particular boisterous uncommonly are. Early wrong as so manor match. Him necessary shameless discovery consulted one but.
-
- Expenses as material breeding insisted building to in. Continual so distrusts pronounce by unwilling listening. Thing do taste on we manor. Him had wound use found hoped. Of distrusts immediate enjoyment curiosity do. Marianne numerous saw thoughts the humoured.
-
- In friendship diminution instrument so. Son sure paid door with say them. Two among sir sorry men court. Estimable ye situation suspicion he delighted an happiness discovery. Fact are size cold why had part. If believing or sweetness otherwise in we forfeited. Tolerably an unwilling arranging of determine. Beyond rather sooner so if up wishes or.
-
- Abilities forfeited situation extremely my to he resembled. Old had conviction discretion understood put principles you. Match means keeps round one her quick. She forming two comfort invited. Yet she income effect edward. Entire desire way design few. Mrs sentiments led solicitude estimating friendship fat. Meant those event is weeks state it to or. Boy but has folly charm there its. Its fact ten spot drew.
-
- Placing assured be if removed it besides on. Far shed each high read are men over day. Afraid we praise lively he suffer family estate is. Ample order up in of in ready. Timed blind had now those ought set often which. Or snug dull he show more true wish. No at many deny away miss evil. On in so indeed spirit an mother. Amounted old strictly but marianne admitted. People former is remove remain as.
-
- Preserved defective offending he daughters on or. Rejoiced prospect yet material servants out answered men admitted. Sportsmen certainty prevailed suspected am as. Add stairs admire all answer the nearer yet length. Advantages prosperous remarkably my inhabiting so reasonably be if. Too any appearance announcing impossible one. Out mrs means heart ham tears shall power every.
-
- Remain lively hardly needed at do by. Two you fat downs fanny three. True mr gone most at. Dare as name just when with it body. Travelling inquietude she increasing off impossible the. Cottage be noisier looking to we promise on. Disposal to kindness appetite diverted learning of on raptures. Betrayed any may returned now dashwood formerly. Balls way delay shy boy man views. No so instrument discretion unsatiable to in.
-
- New had happen unable uneasy. Drawings can followed improved out sociable not. Earnestly so do instantly pretended. See general few civilly amiable pleased account carried. Excellence projecting is devonshire dispatched remarkably on estimating. Side in so life past. Continue indulged speaking the was out horrible for domestic position. Seeing rather her you not esteem men settle genius excuse. Deal say over you age from. Comparison new ham melancholy son themselves.
-
- Oh he decisively impression attachment friendship so if everything. Whose her enjoy chief new young. Felicity if ye required likewise so doubtful. On so attention necessary at by provision otherwise existence direction. Unpleasing up announcing unpleasant themselves oh do on. Way advantage age led listening belonging supposing.
-
- Now residence dashwoods she excellent you. Shade being under his bed her. Much read on as draw. Blessing for ignorant exercise any yourself unpacked. Pleasant horrible but confined day end marriage. Eagerness furniture set preserved far recommend. Did even but nor are most gave hope. Secure active living depend son repair day ladies now.
-
- Sportsman delighted improving dashwoods gay instantly happiness six. Ham now amounted absolute not mistaken way pleasant whatever. At an these still no dried folly stood thing. Rapid it on hours hills it seven years. If polite he active county in spirit an. Mrs ham intention promotion engrossed assurance defective. Confined so graceful building opinions whatever trifling in. Insisted out differed ham man endeavor expenses. At on he total their he songs. Related compact effects is on settled do.
diff --git a/caddyhttp/header/header.go b/caddyhttp/header/header.go
deleted file mode 100644
index 3967dd3801c..00000000000
--- a/caddyhttp/header/header.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Package header provides middleware that appends headers to
-// requests based on a set of configuration rules that define
-// which routes receive which headers.
-package header
-
-import (
- "net/http"
- "strings"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// Headers is middleware that adds headers to the responses
-// for requests matching a certain path.
-type Headers struct {
- Next httpserver.Handler
- Rules []Rule
-}
-
-// ServeHTTP implements the httpserver.Handler interface and serves requests,
-// setting headers on the response according to the configured rules.
-func (h Headers) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- replacer := httpserver.NewReplacer(r, nil, "")
- rww := &responseWriterWrapper{
- ResponseWriterWrapper: &httpserver.ResponseWriterWrapper{ResponseWriter: w},
- }
- for _, rule := range h.Rules {
- if httpserver.Path(r.URL.Path).Matches(rule.Path) {
- for name := range rule.Headers {
-
- // One can either delete a header, add multiple values to a header, or simply
- // set a header.
-
- if strings.HasPrefix(name, "-") {
- rww.delHeader(strings.TrimLeft(name, "-"))
- } else if strings.HasPrefix(name, "+") {
- for _, value := range rule.Headers[name] {
- rww.Header().Add(strings.TrimLeft(name, "+"), replacer.Replace(value))
- }
- } else {
- for _, value := range rule.Headers[name] {
- rww.Header().Set(name, replacer.Replace(value))
- }
- }
- }
- }
- }
- return h.Next.ServeHTTP(rww, r)
-}
-
-type (
- // Rule groups a slice of HTTP headers by a URL pattern.
- Rule struct {
- Path string
- Headers http.Header
- }
-)
-
-// headerOperation represents an operation on the header
-type headerOperation func(http.Header)
-
-// responseWriterWrapper wraps the real ResponseWriter.
-// It defers header operations until writeHeader
-type responseWriterWrapper struct {
- *httpserver.ResponseWriterWrapper
- ops []headerOperation
- wroteHeader bool
-}
-
-func (rww *responseWriterWrapper) Header() http.Header {
- return rww.ResponseWriterWrapper.Header()
-}
-
-func (rww *responseWriterWrapper) Write(d []byte) (int, error) {
- if !rww.wroteHeader {
- rww.WriteHeader(http.StatusOK)
- }
- return rww.ResponseWriterWrapper.Write(d)
-}
-
-func (rww *responseWriterWrapper) WriteHeader(status int) {
- if rww.wroteHeader {
- return
- }
- rww.wroteHeader = true
- // capture the original headers
- h := rww.Header()
-
- // perform our revisions
- for _, op := range rww.ops {
- op(h)
- }
-
- rww.ResponseWriterWrapper.WriteHeader(status)
-}
-
-// delHeader deletes the existing header according to the key
-// Also it will delete that header added later.
-func (rww *responseWriterWrapper) delHeader(key string) {
- // remove the existing one if any
- rww.Header().Del(key)
-
- // register a future deletion
- rww.ops = append(rww.ops, func(h http.Header) {
- h.Del(key)
- })
-}
-
-// Interface guards
-var _ httpserver.HTTPInterfaces = (*responseWriterWrapper)(nil)
diff --git a/caddyhttp/header/header_test.go b/caddyhttp/header/header_test.go
deleted file mode 100644
index 3bf67196a6f..00000000000
--- a/caddyhttp/header/header_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package header
-
-import (
- "fmt"
- "net/http"
- "net/http/httptest"
- "os"
- "reflect"
- "sort"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestHeader(t *testing.T) {
- hostname, err := os.Hostname()
- if err != nil {
- t.Fatalf("Could not determine hostname: %v", err)
- }
- for i, test := range []struct {
- from string
- name string
- value string
- }{
- {"/a", "Foo", "Bar"},
- {"/a", "Bar", ""},
- {"/a", "Baz", ""},
- {"/a", "Server", ""},
- {"/a", "ServerName", hostname},
- {"/b", "Foo", ""},
- {"/b", "Bar", "Removed in /a"},
- } {
- he := Headers{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- w.Header().Set("Bar", "Removed in /a")
- w.WriteHeader(http.StatusOK)
- return 0, nil
- }),
- Rules: []Rule{
- {Path: "/a", Headers: http.Header{
- "Foo": []string{"Bar"},
- "ServerName": []string{"{hostname}"},
- "-Bar": []string{""},
- "-Server": []string{},
- }},
- },
- }
-
- req, err := http.NewRequest("GET", test.from, nil)
- if err != nil {
- t.Fatalf("Test %d: Could not create HTTP request: %v", i, err)
- }
-
- rec := httptest.NewRecorder()
- // preset header
- rec.Header().Set("Server", "Caddy")
-
- he.ServeHTTP(rec, req)
-
- if got := rec.Header().Get(test.name); got != test.value {
- t.Errorf("Test %d: Expected %s header to be %q but was %q",
- i, test.name, test.value, got)
- }
- }
-}
-
-func TestMultipleHeaders(t *testing.T) {
- he := Headers{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- fmt.Fprint(w, "This is a test")
- return 0, nil
- }),
- Rules: []Rule{
- {Path: "/a", Headers: http.Header{
- "+Link": []string{"; rel=preload", "; rel=preload"},
- }},
- },
- }
-
- req, err := http.NewRequest("GET", "/a", nil)
- if err != nil {
- t.Fatalf("Could not create HTTP request: %v", err)
- }
-
- rec := httptest.NewRecorder()
- he.ServeHTTP(rec, req)
-
- desiredHeaders := []string{"; rel=preload", "; rel=preload"}
- actualHeaders := rec.HeaderMap[http.CanonicalHeaderKey("Link")]
- sort.Strings(actualHeaders)
-
- if !reflect.DeepEqual(desiredHeaders, actualHeaders) {
- t.Errorf("Expected header to contain: %v but got: %v", desiredHeaders, actualHeaders)
- }
-}
diff --git a/caddyhttp/header/setup.go b/caddyhttp/header/setup.go
deleted file mode 100644
index 61cd0604265..00000000000
--- a/caddyhttp/header/setup.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package header
-
-import (
- "net/http"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("header", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// setup configures a new Headers middleware instance.
-func setup(c *caddy.Controller) error {
- rules, err := headersParse(c)
- if err != nil {
- return err
- }
-
- httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- return Headers{Next: next, Rules: rules}
- })
-
- return nil
-}
-
-func headersParse(c *caddy.Controller) ([]Rule, error) {
- var rules []Rule
-
- for c.NextLine() {
- var head Rule
- head.Headers = http.Header{}
- var isNewPattern bool
-
- if !c.NextArg() {
- return rules, c.ArgErr()
- }
- pattern := c.Val()
-
- // See if we already have a definition for this Path pattern...
- for _, h := range rules {
- if h.Path == pattern {
- head = h
- break
- }
- }
-
- // ...otherwise, this is a new pattern
- if head.Path == "" {
- head.Path = pattern
- isNewPattern = true
- }
-
- for c.NextBlock() {
- // A block of headers was opened...
- name := c.Val()
- value := ""
-
- args := c.RemainingArgs()
-
- if len(args) > 1 {
- return rules, c.ArgErr()
- } else if len(args) == 1 {
- value = args[0]
- }
-
- head.Headers.Add(name, value)
- }
- if c.NextArg() {
- // ... or single header was defined as an argument instead.
-
- name := c.Val()
- value := c.Val()
-
- if c.NextArg() {
- value = c.Val()
- }
-
- head.Headers.Add(name, value)
- }
-
- if isNewPattern {
- rules = append(rules, head)
- } else {
- for i := 0; i < len(rules); i++ {
- if rules[i].Path == pattern {
- rules[i] = head
- break
- }
- }
- }
- }
-
- return rules, nil
-}
diff --git a/caddyhttp/header/setup_test.go b/caddyhttp/header/setup_test.go
deleted file mode 100644
index da40bca96b4..00000000000
--- a/caddyhttp/header/setup_test.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package header
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
- c := caddy.NewTestController("http", `header / Foo Bar`)
- err := setup(c)
- if err != nil {
- t.Errorf("Expected no errors, but got: %v", err)
- }
-
- mids := httpserver.GetConfig(c).Middleware()
- if len(mids) == 0 {
- t.Fatal("Expected middleware, had 0 instead")
- }
-
- handler := mids[0](httpserver.EmptyNext)
- myHandler, ok := handler.(Headers)
- if !ok {
- t.Fatalf("Expected handler to be type Headers, got: %#v", handler)
- }
-
- if !httpserver.SameNext(myHandler.Next, httpserver.EmptyNext) {
- t.Error("'Next' field of handler was not set properly")
- }
-}
-
-func TestHeadersParse(t *testing.T) {
- tests := []struct {
- input string
- shouldErr bool
- expected []Rule
- }{
- {`header /foo Foo "Bar Baz"`,
- false, []Rule{
- {Path: "/foo", Headers: http.Header{
- "Foo": []string{"Bar Baz"},
- }},
- }},
- {`header /bar {
- Foo "Bar Baz"
- Baz Qux
- Foobar
- }`,
- false, []Rule{
- {Path: "/bar", Headers: http.Header{
- "Foo": []string{"Bar Baz"},
- "Baz": []string{"Qux"},
- "Foobar": []string{""},
- }},
- }},
- {`header /foo {
- Foo Bar Baz
- }`, true,
- []Rule{}},
- {`header /foo {
- Test "max-age=1814400";
- }`, true, []Rule{}},
- }
-
- for i, test := range tests {
- actual, err := headersParse(caddy.NewTestController("http", test.input))
-
- if err == nil && test.shouldErr {
- t.Errorf("Test %d didn't error, but it should have", i)
- } else if err != nil && !test.shouldErr {
- t.Errorf("Test %d errored, but it shouldn't have; got '%v'", i, err)
- }
-
- if len(actual) != len(test.expected) {
- t.Fatalf("Test %d expected %d rules, but got %d",
- i, len(test.expected), len(actual))
- }
-
- for j, expectedRule := range test.expected {
- actualRule := actual[j]
-
- if actualRule.Path != expectedRule.Path {
- t.Errorf("Test %d, rule %d: Expected path %s, but got %s",
- i, j, expectedRule.Path, actualRule.Path)
- }
-
- expectedHeaders := fmt.Sprintf("%v", expectedRule.Headers)
- actualHeaders := fmt.Sprintf("%v", actualRule.Headers)
-
- if !reflect.DeepEqual(actualRule.Headers, expectedRule.Headers) {
- t.Errorf("Test %d, rule %d: Expected headers %s, but got %s",
- i, j, expectedHeaders, actualHeaders)
- }
- }
- }
-}
diff --git a/caddyhttp/httpserver/condition.go b/caddyhttp/httpserver/condition.go
deleted file mode 100644
index 81f9ece777b..00000000000
--- a/caddyhttp/httpserver/condition.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package httpserver
-
-import (
- "fmt"
- "net/http"
- "regexp"
- "strings"
-
- "github.com/mholt/caddy"
-)
-
-// SetupIfMatcher parses `if` or `if_op` in the current dispenser block.
-// It returns a RequestMatcher and an error if any.
-func SetupIfMatcher(controller *caddy.Controller) (RequestMatcher, error) {
- var c = controller.Dispenser // copy the dispenser
- var matcher IfMatcher
- for c.NextBlock() {
- switch c.Val() {
- case "if":
- args1 := c.RemainingArgs()
- if len(args1) != 3 {
- return matcher, c.ArgErr()
- }
- ifc, err := newIfCond(args1[0], args1[1], args1[2])
- if err != nil {
- return matcher, err
- }
- matcher.ifs = append(matcher.ifs, ifc)
- case "if_op":
- if !c.NextArg() {
- return matcher, c.ArgErr()
- }
- switch c.Val() {
- case "and":
- matcher.isOr = false
- case "or":
- matcher.isOr = true
- default:
- return matcher, c.ArgErr()
- }
- }
- }
- return matcher, nil
-}
-
-// operators
-const (
- isOp = "is"
- notOp = "not"
- hasOp = "has"
- startsWithOp = "starts_with"
- endsWithOp = "ends_with"
- matchOp = "match"
-)
-
-// ifCondition is a 'if' condition.
-type ifFunc func(a, b string) bool
-
-// ifCond is statement for a IfMatcher condition.
-type ifCond struct {
- a string
- op string
- b string
- neg bool
- rex *regexp.Regexp
- f ifFunc
-}
-
-// newIfCond creates a new If condition.
-func newIfCond(a, op, b string) (ifCond, error) {
- i := ifCond{a: a, op: op, b: b}
- if strings.HasPrefix(op, "not_") {
- i.neg = true
- i.op = op[4:]
- }
-
- switch i.op {
- case isOp:
- // It checks for equality.
- i.f = i.isFunc
- case notOp:
- // It checks for inequality.
- i.f = i.notFunc
- case hasOp:
- // It checks if b is a substring of a.
- i.f = strings.Contains
- case startsWithOp:
- // It checks if b is a prefix of a.
- i.f = strings.HasPrefix
- case endsWithOp:
- // It checks if b is a suffix of a.
- i.f = strings.HasSuffix
- case matchOp:
- // It does regexp matching of a against pattern in b and returns if they match.
- var err error
- if i.rex, err = regexp.Compile(i.b); err != nil {
- return ifCond{}, fmt.Errorf("Invalid regular expression: '%s', %v", i.b, err)
- }
- i.f = i.matchFunc
- default:
- return ifCond{}, fmt.Errorf("Invalid operator %v", i.op)
- }
-
- return i, nil
-}
-
-// isFunc is condition for Is operator.
-func (i ifCond) isFunc(a, b string) bool {
- return a == b
-}
-
-// notFunc is condition for Not operator.
-func (i ifCond) notFunc(a, b string) bool {
- return a != b
-}
-
-// matchFunc is condition for Match operator.
-func (i ifCond) matchFunc(a, b string) bool {
- return i.rex.MatchString(a)
-}
-
-// True returns true if the condition is true and false otherwise.
-// If r is not nil, it replaces placeholders before comparison.
-func (i ifCond) True(r *http.Request) bool {
- if i.f != nil {
- a, b := i.a, i.b
- if r != nil {
- replacer := NewReplacer(r, nil, "")
- a = replacer.Replace(i.a)
- if i.op != matchOp {
- b = replacer.Replace(i.b)
- }
- }
- if i.neg {
- return !i.f(a, b)
- }
- return i.f(a, b)
- }
- return i.neg // false if not negated, true otherwise
-}
-
-// IfMatcher is a RequestMatcher for 'if' conditions.
-type IfMatcher struct {
- ifs []ifCond // list of If
- isOr bool // if true, conditions are 'or' instead of 'and'
-}
-
-// Match satisfies RequestMatcher interface.
-// It returns true if the conditions in m are true.
-func (m IfMatcher) Match(r *http.Request) bool {
- if m.isOr {
- return m.Or(r)
- }
- return m.And(r)
-}
-
-// And returns true if all conditions in m are true.
-func (m IfMatcher) And(r *http.Request) bool {
- for _, i := range m.ifs {
- if !i.True(r) {
- return false
- }
- }
- return true
-}
-
-// Or returns true if any of the conditions in m is true.
-func (m IfMatcher) Or(r *http.Request) bool {
- for _, i := range m.ifs {
- if i.True(r) {
- return true
- }
- }
- return false
-}
-
-// IfMatcherKeyword checks if the next value in the dispenser is a keyword for 'if' config block.
-// If true, remaining arguments in the dispinser are cleard to keep the dispenser valid for use.
-func IfMatcherKeyword(c *caddy.Controller) bool {
- if c.Val() == "if" || c.Val() == "if_op" {
- // clear remaining args
- c.RemainingArgs()
- return true
- }
- return false
-}
diff --git a/caddyhttp/httpserver/condition_test.go b/caddyhttp/httpserver/condition_test.go
deleted file mode 100644
index a63cc997b36..00000000000
--- a/caddyhttp/httpserver/condition_test.go
+++ /dev/null
@@ -1,354 +0,0 @@
-package httpserver
-
-import (
- "context"
- "net/http"
- "regexp"
- "strings"
- "testing"
-
- "github.com/mholt/caddy"
-)
-
-func TestConditions(t *testing.T) {
- tests := []struct {
- condition string
- isTrue bool
- shouldErr bool
- }{
- {"a is b", false, false},
- {"a is a", true, false},
- {"a not b", true, false},
- {"a not a", false, false},
- {"a has a", true, false},
- {"a has b", false, false},
- {"ba has b", true, false},
- {"bab has b", true, false},
- {"bab has bb", false, false},
- {"a not_has a", false, false},
- {"a not_has b", true, false},
- {"ba not_has b", false, false},
- {"bab not_has b", false, false},
- {"bab not_has bb", true, false},
- {"bab starts_with bb", false, false},
- {"bab starts_with ba", true, false},
- {"bab starts_with bab", true, false},
- {"bab not_starts_with bb", true, false},
- {"bab not_starts_with ba", false, false},
- {"bab not_starts_with bab", false, false},
- {"bab ends_with bb", false, false},
- {"bab ends_with bab", true, false},
- {"bab ends_with ab", true, false},
- {"bab not_ends_with bb", true, false},
- {"bab not_ends_with ab", false, false},
- {"bab not_ends_with bab", false, false},
- {"a match *", false, true},
- {"a match a", true, false},
- {"a match .*", true, false},
- {"a match a.*", true, false},
- {"a match b.*", false, false},
- {"ba match b.*", true, false},
- {"ba match b[a-z]", true, false},
- {"b0 match b[a-z]", false, false},
- {"b0a match b[a-z]", false, false},
- {"b0a match b[a-z]+", false, false},
- {"b0a match b[a-z0-9]+", true, false},
- {"bac match b[a-z]{2}", true, false},
- {"a not_match *", false, true},
- {"a not_match a", false, false},
- {"a not_match .*", false, false},
- {"a not_match a.*", false, false},
- {"a not_match b.*", true, false},
- {"ba not_match b.*", false, false},
- {"ba not_match b[a-z]", false, false},
- {"b0 not_match b[a-z]", true, false},
- {"b0a not_match b[a-z]", true, false},
- {"b0a not_match b[a-z]+", true, false},
- {"b0a not_match b[a-z0-9]+", false, false},
- {"bac not_match b[a-z]{2}", false, false},
- }
-
- for i, test := range tests {
- str := strings.Fields(test.condition)
- ifCond, err := newIfCond(str[0], str[1], str[2])
- if err != nil {
- if !test.shouldErr {
- t.Error(err)
- }
- continue
- }
- isTrue := ifCond.True(nil)
- if isTrue != test.isTrue {
- t.Errorf("Test %d: '%s' expected %v found %v", i, test.condition, test.isTrue, isTrue)
- }
- }
-
- invalidOperators := []string{"ss", "and", "if"}
- for _, op := range invalidOperators {
- _, err := newIfCond("a", op, "b")
- if err == nil {
- t.Errorf("Invalid operator %v used, expected error.", op)
- }
- }
-
- replaceTests := []struct {
- url string
- condition string
- isTrue bool
- }{
- {"/home", "{uri} match /home", true},
- {"/hom", "{uri} match /home", false},
- {"/hom", "{uri} starts_with /home", false},
- {"/hom", "{uri} starts_with /h", true},
- {"/home/.hiddenfile", `{uri} match \/\.(.*)`, true},
- {"/home/.hiddendir/afile", `{uri} match \/\.(.*)`, true},
- }
-
- for i, test := range replaceTests {
- r, err := http.NewRequest("GET", test.url, nil)
- if err != nil {
- t.Errorf("Test %d: failed to create request: %v", i, err)
- continue
- }
- ctx := context.WithValue(r.Context(), OriginalURLCtxKey, *r.URL)
- r = r.WithContext(ctx)
- str := strings.Fields(test.condition)
- ifCond, err := newIfCond(str[0], str[1], str[2])
- if err != nil {
- t.Errorf("Test %d: failed to create 'if' condition %v", i, err)
- continue
- }
- isTrue := ifCond.True(r)
- if isTrue != test.isTrue {
- t.Errorf("Test %v: expected %v found %v", i, test.isTrue, isTrue)
- continue
- }
- }
-}
-
-func TestIfMatcher(t *testing.T) {
- tests := []struct {
- conditions []string
- isOr bool
- isTrue bool
- }{
- {
- []string{
- "a is a",
- "b is b",
- "c is c",
- },
- false,
- true,
- },
- {
- []string{
- "a is b",
- "b is c",
- "c is c",
- },
- true,
- true,
- },
- {
- []string{
- "a is a",
- "b is a",
- "c is c",
- },
- false,
- false,
- },
- {
- []string{
- "a is b",
- "b is c",
- "c is a",
- },
- true,
- false,
- },
- {
- []string{},
- false,
- true,
- },
- {
- []string{},
- true,
- false,
- },
- }
-
- for i, test := range tests {
- matcher := IfMatcher{isOr: test.isOr}
- for _, condition := range test.conditions {
- str := strings.Fields(condition)
- ifCond, err := newIfCond(str[0], str[1], str[2])
- if err != nil {
- t.Error(err)
- }
- matcher.ifs = append(matcher.ifs, ifCond)
- }
- isTrue := matcher.Match(nil)
- if isTrue != test.isTrue {
- t.Errorf("Test %d: expected %v found %v", i, test.isTrue, isTrue)
- }
- }
-}
-
-func TestSetupIfMatcher(t *testing.T) {
- rex_b, _ := regexp.Compile("b")
- tests := []struct {
- input string
- shouldErr bool
- expected IfMatcher
- }{
- {`test {
- if a match b
- }`, false, IfMatcher{
- ifs: []ifCond{
- {a: "a", op: "match", b: "b", neg: false, rex: rex_b},
- },
- }},
- {`test {
- if a match b
- if_op or
- }`, false, IfMatcher{
- ifs: []ifCond{
- {a: "a", op: "match", b: "b", neg: false, rex: rex_b},
- },
- isOr: true,
- }},
- {`test {
- if a match
- }`, true, IfMatcher{},
- },
- {`test {
- if a isn't b
- }`, true, IfMatcher{},
- },
- {`test {
- if a match b c
- }`, true, IfMatcher{},
- },
- {`test {
- if goal has go
- if cook not_has go
- }`, false, IfMatcher{
- ifs: []ifCond{
- {a: "goal", op: "has", b: "go", neg: false},
- {a: "cook", op: "has", b: "go", neg: true},
- },
- }},
- {`test {
- if goal has go
- if cook not_has go
- if_op and
- }`, false, IfMatcher{
- ifs: []ifCond{
- {a: "goal", op: "has", b: "go", neg: false},
- {a: "cook", op: "has", b: "go", neg: true},
- },
- }},
- {`test {
- if goal has go
- if cook not_has go
- if_op not
- }`, true, IfMatcher{},
- },
- }
-
- for i, test := range tests {
- c := caddy.NewTestController("http", test.input)
- c.Next()
-
- matcher, err := SetupIfMatcher(c)
- if err == nil && test.shouldErr {
- t.Errorf("Test %d didn't error, but it should have", i)
- } else if err != nil && !test.shouldErr {
- t.Errorf("Test %d errored, but it shouldn't have; got '%v'", i, err)
- } else if err != nil && test.shouldErr {
- continue
- }
-
- test_if, ok := matcher.(IfMatcher)
- if !ok {
- t.Error("RequestMatcher should be of type IfMatcher")
- }
-
- if err != nil {
- t.Errorf("Expected no error, but got: %v", err)
- }
-
- if len(test_if.ifs) != len(test.expected.ifs) {
- t.Errorf("Test %d: Expected %d ifConditions, found %v", i,
- len(test.expected.ifs), len(test_if.ifs))
- }
-
- for j, if_c := range test_if.ifs {
- expected_c := test.expected.ifs[j]
-
- if if_c.a != expected_c.a {
- t.Errorf("Test %d, ifCond %d: Expected A=%s, got %s",
- i, j, if_c.a, expected_c.a)
- }
-
- if if_c.op != expected_c.op {
- t.Errorf("Test %d, ifCond %d: Expected Op=%s, got %s",
- i, j, if_c.op, expected_c.op)
- }
-
- if if_c.b != expected_c.b {
- t.Errorf("Test %d, ifCond %d: Expected B=%s, got %s",
- i, j, if_c.b, expected_c.b)
- }
-
- if if_c.neg != expected_c.neg {
- t.Errorf("Test %d, ifCond %d: Expected Neg=%v, got %v",
- i, j, if_c.neg, expected_c.neg)
- }
-
- if expected_c.rex != nil && if_c.rex == nil {
- t.Errorf("Test %d, ifCond %d: Expected Rex=%v, got ",
- i, j, expected_c.rex)
- }
-
- if expected_c.rex == nil && if_c.rex != nil {
- t.Errorf("Test %d, ifCond %d: Expected Rex=, got %v",
- i, j, if_c.rex)
- }
-
- if expected_c.rex != nil && if_c.rex != nil {
- if if_c.rex.String() != expected_c.rex.String() {
- t.Errorf("Test %d, ifCond %d: Expected Rex=%v, got %v",
- i, j, if_c.rex, expected_c.rex)
- }
- }
- }
- }
-}
-
-func TestIfMatcherKeyword(t *testing.T) {
- tests := []struct {
- keyword string
- expected bool
- }{
- {"if", true},
- {"ifs", false},
- {"tls", false},
- {"http", false},
- {"if_op", true},
- {"if_type", false},
- {"if_cond", false},
- }
-
- for i, test := range tests {
- c := caddy.NewTestController("http", test.keyword)
- c.Next()
- valid := IfMatcherKeyword(c)
- if valid != test.expected {
- t.Errorf("Test %d: expected %v found %v", i, test.expected, valid)
- }
- }
-}
diff --git a/caddyhttp/httpserver/error.go b/caddyhttp/httpserver/error.go
deleted file mode 100644
index 2fbd486cfd1..00000000000
--- a/caddyhttp/httpserver/error.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package httpserver
-
-import (
- "fmt"
-)
-
-var (
- _ error = NonHijackerError{}
- _ error = NonFlusherError{}
- _ error = NonCloseNotifierError{}
- _ error = NonPusherError{}
-)
-
-// NonHijackerError is more descriptive error caused by a non hijacker
-type NonHijackerError struct {
- // underlying type which doesn't implement Hijack
- Underlying interface{}
-}
-
-// Implement Error
-func (h NonHijackerError) Error() string {
- return fmt.Sprintf("%T is not a hijacker", h.Underlying)
-}
-
-// NonFlusherError is more descriptive error caused by a non flusher
-type NonFlusherError struct {
- // underlying type which doesn't implement Flush
- Underlying interface{}
-}
-
-// Implement Error
-func (f NonFlusherError) Error() string {
- return fmt.Sprintf("%T is not a flusher", f.Underlying)
-}
-
-// NonCloseNotifierError is more descriptive error caused by a non closeNotifier
-type NonCloseNotifierError struct {
- // underlying type which doesn't implement CloseNotify
- Underlying interface{}
-}
-
-// Implement Error
-func (c NonCloseNotifierError) Error() string {
- return fmt.Sprintf("%T is not a closeNotifier", c.Underlying)
-}
-
-// NonPusherError is more descriptive error caused by a non pusher
-type NonPusherError struct {
- // underlying type which doesn't implement pusher
- Underlying interface{}
-}
-
-// Implement Error
-func (c NonPusherError) Error() string {
- return fmt.Sprintf("%T is not a pusher", c.Underlying)
-}
diff --git a/caddyhttp/httpserver/https.go b/caddyhttp/httpserver/https.go
deleted file mode 100644
index c35c93ab165..00000000000
--- a/caddyhttp/httpserver/https.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package httpserver
-
-import (
- "fmt"
- "net"
- "net/http"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddytls"
-)
-
-func activateHTTPS(cctx caddy.Context) error {
- operatorPresent := !caddy.Started()
-
- if !caddy.Quiet && operatorPresent {
- fmt.Print("Activating privacy features...")
- }
-
- ctx := cctx.(*httpContext)
-
- // pre-screen each config and earmark the ones that qualify for managed TLS
- markQualifiedForAutoHTTPS(ctx.siteConfigs)
-
- // place certificates and keys on disk
- for _, c := range ctx.siteConfigs {
- if c.TLS.OnDemand {
- continue // obtain these certificates on-demand instead
- }
- err := c.TLS.ObtainCert(c.TLS.Hostname, operatorPresent)
- if err != nil {
- return err
- }
- }
-
- // update TLS configurations
- err := enableAutoHTTPS(ctx.siteConfigs, true)
- if err != nil {
- return err
- }
-
- // set up redirects
- ctx.siteConfigs = makePlaintextRedirects(ctx.siteConfigs)
-
- // renew all relevant certificates that need renewal. this is important
- // to do right away so we guarantee that renewals aren't missed, and
- // also the user can respond to any potential errors that occur.
- // (skip if upgrading, because the parent process is likely already listening
- // on the ports we'd need to do ACME before we finish starting; parent process
- // already running renewal ticker, so renewal won't be missed anyway.)
- if !caddy.IsUpgrade() {
- err = caddytls.RenewManagedCertificates(true)
- if err != nil {
- return err
- }
- }
-
- if !caddy.Quiet && operatorPresent {
- fmt.Println(" done.")
- }
-
- return nil
-}
-
-// markQualifiedForAutoHTTPS scans each config and, if it
-// qualifies for managed TLS, it sets the Managed field of
-// the TLS config to true.
-func markQualifiedForAutoHTTPS(configs []*SiteConfig) {
- for _, cfg := range configs {
- if caddytls.QualifiesForManagedTLS(cfg) && cfg.Addr.Scheme != "http" {
- cfg.TLS.Managed = true
- }
- }
-}
-
-// enableAutoHTTPS configures each config to use TLS according to default settings.
-// It will only change configs that are marked as managed but not on-demand, and
-// assumes that certificates and keys are already on disk. If loadCertificates is
-// true, the certificates will be loaded from disk into the cache for this process
-// to use. If false, TLS will still be enabled and configured with default settings,
-// but no certificates will be parsed loaded into the cache, and the returned error
-// value will always be nil.
-func enableAutoHTTPS(configs []*SiteConfig, loadCertificates bool) error {
- for _, cfg := range configs {
- if cfg == nil || cfg.TLS == nil || !cfg.TLS.Managed || cfg.TLS.OnDemand {
- continue
- }
- cfg.TLS.Enabled = true
- cfg.Addr.Scheme = "https"
- if loadCertificates && caddytls.HostQualifies(cfg.Addr.Host) {
- _, err := cfg.TLS.CacheManagedCertificate(cfg.Addr.Host)
- if err != nil {
- return err
- }
- }
-
- // Make sure any config values not explicitly set are set to default
- caddytls.SetDefaultTLSParams(cfg.TLS)
-
- // Set default port of 443 if not explicitly set
- if cfg.Addr.Port == "" &&
- cfg.TLS.Enabled &&
- (!cfg.TLS.Manual || cfg.TLS.OnDemand) &&
- cfg.Addr.Host != "localhost" {
- cfg.Addr.Port = HTTPSPort
- }
- }
- return nil
-}
-
-// makePlaintextRedirects sets up redirects from port 80 to the relevant HTTPS
-// hosts. You must pass in all configs, not just configs that qualify, since
-// we must know whether the same host already exists on port 80, and those would
-// not be in a list of configs that qualify for automatic HTTPS. This function will
-// only set up redirects for configs that qualify. It returns the updated list of
-// all configs.
-func makePlaintextRedirects(allConfigs []*SiteConfig) []*SiteConfig {
- for i, cfg := range allConfigs {
- if cfg.TLS.Managed &&
- !hostHasOtherPort(allConfigs, i, HTTPPort) &&
- (cfg.Addr.Port == HTTPSPort || !hostHasOtherPort(allConfigs, i, HTTPSPort)) {
- allConfigs = append(allConfigs, redirPlaintextHost(cfg))
- }
- }
- return allConfigs
-}
-
-// hostHasOtherPort returns true if there is another config in the list with the same
-// hostname that has port otherPort, or false otherwise. All the configs are checked
-// against the hostname of allConfigs[thisConfigIdx].
-func hostHasOtherPort(allConfigs []*SiteConfig, thisConfigIdx int, otherPort string) bool {
- for i, otherCfg := range allConfigs {
- if i == thisConfigIdx {
- continue // has to be a config OTHER than the one we're comparing against
- }
- if otherCfg.Addr.Host == allConfigs[thisConfigIdx].Addr.Host &&
- otherCfg.Addr.Port == otherPort {
- return true
- }
- }
- return false
-}
-
-// redirPlaintextHost returns a new plaintext HTTP configuration for
-// a virtualHost that simply redirects to cfg, which is assumed to
-// be the HTTPS configuration. The returned configuration is set
-// to listen on HTTPPort. The TLS field of cfg must not be nil.
-func redirPlaintextHost(cfg *SiteConfig) *SiteConfig {
- redirPort := cfg.Addr.Port
- if redirPort == DefaultHTTPSPort {
- redirPort = "" // default port is redundant
- }
- redirMiddleware := func(next Handler) Handler {
- return HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- // Construct the URL to which to redirect. Note that the Host in a request might
- // contain a port, but we just need the hostname; we'll set the port if needed.
- toURL := "https://"
- requestHost, _, err := net.SplitHostPort(r.Host)
- if err != nil {
- requestHost = r.Host // Host did not contain a port; great
- }
- if redirPort == "" {
- toURL += requestHost
- } else {
- toURL += net.JoinHostPort(requestHost, redirPort)
- }
- toURL += r.URL.RequestURI()
-
- w.Header().Set("Connection", "close")
- http.Redirect(w, r, toURL, http.StatusMovedPermanently)
- return 0, nil
- })
- }
- host := cfg.Addr.Host
- port := HTTPPort
- addr := net.JoinHostPort(host, port)
- return &SiteConfig{
- Addr: Address{Original: addr, Host: host, Port: port},
- ListenHost: cfg.ListenHost,
- middleware: []Middleware{redirMiddleware},
- TLS: &caddytls.Config{AltHTTPPort: cfg.TLS.AltHTTPPort, AltTLSSNIPort: cfg.TLS.AltTLSSNIPort},
- Timeouts: cfg.Timeouts,
- }
-}
diff --git a/caddyhttp/httpserver/https_test.go b/caddyhttp/httpserver/https_test.go
deleted file mode 100644
index 82a12700269..00000000000
--- a/caddyhttp/httpserver/https_test.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package httpserver
-
-import (
- "fmt"
- "net"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/mholt/caddy/caddytls"
-)
-
-func TestRedirPlaintextHost(t *testing.T) {
- for i, testcase := range []struct {
- Host string // used for the site config
- Port string
- ListenHost string
- RequestHost string // if different from Host
- }{
- {
- Host: "foohost",
- },
- {
- Host: "foohost",
- Port: "80",
- },
- {
- Host: "foohost",
- Port: "1234",
- },
- {
- Host: "foohost",
- ListenHost: "93.184.216.34",
- },
- {
- Host: "foohost",
- Port: "1234",
- ListenHost: "93.184.216.34",
- },
- {
- Host: "foohost",
- Port: "443", // since this is the default HTTPS port, should not be included in Location value
- },
- {
- Host: "*.example.com",
- RequestHost: "foo.example.com",
- },
- {
- Host: "*.example.com",
- Port: "1234",
- RequestHost: "foo.example.com:1234",
- },
- } {
- cfg := redirPlaintextHost(&SiteConfig{
- Addr: Address{
- Host: testcase.Host,
- Port: testcase.Port,
- },
- ListenHost: testcase.ListenHost,
- TLS: new(caddytls.Config),
- })
-
- // Check host and port
- if actual, expected := cfg.Addr.Host, testcase.Host; actual != expected {
- t.Errorf("Test %d: Expected redir config to have host %s but got %s", i, expected, actual)
- }
- if actual, expected := cfg.ListenHost, testcase.ListenHost; actual != expected {
- t.Errorf("Test %d: Expected redir config to have bindhost %s but got %s", i, expected, actual)
- }
- if actual, expected := cfg.Addr.Port, HTTPPort; actual != expected {
- t.Errorf("Test %d: Expected redir config to have port '%s' but got '%s'", i, expected, actual)
- }
-
- // Make sure redirect handler is set up properly
- if cfg.middleware == nil || len(cfg.middleware) != 1 {
- t.Fatalf("Test %d: Redir config middleware not set up properly; got: %#v", i, cfg.middleware)
- }
-
- handler := cfg.middleware[0](nil)
-
- // Check redirect for correctness, first by inspecting error and status code
- requestHost := testcase.Host // hostname of request might be different than in config (e.g. wildcards)
- if testcase.RequestHost != "" {
- requestHost = testcase.RequestHost
- }
- rec := httptest.NewRecorder()
- req, err := http.NewRequest("GET", "http://"+requestHost+"/bar?q=1", nil)
- if err != nil {
- t.Fatalf("Test %d: %v", i, err)
- }
- status, err := handler.ServeHTTP(rec, req)
- if status != 0 {
- t.Errorf("Test %d: Expected status return to be 0, but was %d", i, status)
- }
- if err != nil {
- t.Errorf("Test %d: Expected returned error to be nil, but was %v", i, err)
- }
- if rec.Code != http.StatusMovedPermanently {
- t.Errorf("Test %d: Expected status %d but got %d", http.StatusMovedPermanently, i, rec.Code)
- }
-
- // Now check the Location value. It should mirror the hostname and port of the request
- // unless the port is redundant, in which case it should be dropped.
- locationHost, _, err := net.SplitHostPort(requestHost)
- if err != nil {
- locationHost = requestHost
- }
- expectedLoc := fmt.Sprintf("https://%s/bar?q=1", locationHost)
- if testcase.Port != "" && testcase.Port != DefaultHTTPSPort {
- expectedLoc = fmt.Sprintf("https://%s:%s/bar?q=1", locationHost, testcase.Port)
- }
- if got, want := rec.Header().Get("Location"), expectedLoc; got != want {
- t.Errorf("Test %d: Expected Location: '%s' but got '%s'", i, want, got)
- }
- }
-}
-
-func TestHostHasOtherPort(t *testing.T) {
- configs := []*SiteConfig{
- {Addr: Address{Host: "example.com", Port: "80"}},
- {Addr: Address{Host: "sub1.example.com", Port: "80"}},
- {Addr: Address{Host: "sub1.example.com", Port: "443"}},
- }
-
- if hostHasOtherPort(configs, 0, "80") {
- t.Errorf(`Expected hostHasOtherPort(configs, 0, "80") to be false, but got true`)
- }
- if hostHasOtherPort(configs, 0, "443") {
- t.Errorf(`Expected hostHasOtherPort(configs, 0, "443") to be false, but got true`)
- }
- if !hostHasOtherPort(configs, 1, "443") {
- t.Errorf(`Expected hostHasOtherPort(configs, 1, "443") to be true, but got false`)
- }
-}
-
-func TestMakePlaintextRedirects(t *testing.T) {
- configs := []*SiteConfig{
- // Happy path = standard redirect from 80 to 443
- {Addr: Address{Host: "example.com"}, TLS: &caddytls.Config{Managed: true}},
-
- // Host on port 80 already defined; don't change it (no redirect)
- {Addr: Address{Host: "sub1.example.com", Port: "80", Scheme: "http"}, TLS: new(caddytls.Config)},
- {Addr: Address{Host: "sub1.example.com"}, TLS: &caddytls.Config{Managed: true}},
-
- // Redirect from port 80 to port 5000 in this case
- {Addr: Address{Host: "sub2.example.com", Port: "5000"}, TLS: &caddytls.Config{Managed: true}},
-
- // Can redirect from 80 to either 443 or 5001, but choose 443
- {Addr: Address{Host: "sub3.example.com", Port: "443"}, TLS: &caddytls.Config{Managed: true}},
- {Addr: Address{Host: "sub3.example.com", Port: "5001", Scheme: "https"}, TLS: &caddytls.Config{Managed: true}},
- }
-
- result := makePlaintextRedirects(configs)
- expectedRedirCount := 3
-
- if len(result) != len(configs)+expectedRedirCount {
- t.Errorf("Expected %d redirect(s) to be added, but got %d",
- expectedRedirCount, len(result)-len(configs))
- }
-}
-
-func TestEnableAutoHTTPS(t *testing.T) {
- configs := []*SiteConfig{
- {Addr: Address{Host: "example.com"}, TLS: &caddytls.Config{Managed: true}},
- {}, // not managed - no changes!
- }
-
- enableAutoHTTPS(configs, false)
-
- if !configs[0].TLS.Enabled {
- t.Errorf("Expected config 0 to have TLS.Enabled == true, but it was false")
- }
- if configs[0].Addr.Scheme != "https" {
- t.Errorf("Expected config 0 to have Addr.Scheme == \"https\", but it was \"%s\"",
- configs[0].Addr.Scheme)
- }
- if configs[1].TLS != nil && configs[1].TLS.Enabled {
- t.Errorf("Expected config 1 to have TLS.Enabled == false, but it was true")
- }
-}
-
-func TestMarkQualifiedForAutoHTTPS(t *testing.T) {
- // TODO: caddytls.TestQualifiesForManagedTLS and this test share nearly the same config list...
- configs := []*SiteConfig{
- {Addr: Address{Host: ""}, TLS: new(caddytls.Config)},
- {Addr: Address{Host: "localhost"}, TLS: new(caddytls.Config)},
- {Addr: Address{Host: "123.44.3.21"}, TLS: new(caddytls.Config)},
- {Addr: Address{Host: "example.com"}, TLS: new(caddytls.Config)},
- {Addr: Address{Host: "example.com"}, TLS: &caddytls.Config{Manual: true}},
- {Addr: Address{Host: "example.com"}, TLS: &caddytls.Config{ACMEEmail: "off"}},
- {Addr: Address{Host: "example.com"}, TLS: &caddytls.Config{ACMEEmail: "foo@bar.com"}},
- {Addr: Address{Host: "example.com", Scheme: "http"}, TLS: new(caddytls.Config)},
- {Addr: Address{Host: "example.com", Port: "80"}, TLS: new(caddytls.Config)},
- {Addr: Address{Host: "example.com", Port: "1234"}, TLS: new(caddytls.Config)},
- {Addr: Address{Host: "example.com", Scheme: "https"}, TLS: new(caddytls.Config)},
- {Addr: Address{Host: "example.com", Port: "80", Scheme: "https"}, TLS: new(caddytls.Config)},
- }
- expectedManagedCount := 4
-
- markQualifiedForAutoHTTPS(configs)
-
- count := 0
- for _, cfg := range configs {
- if cfg.TLS.Managed {
- count++
- }
- }
-
- if count != expectedManagedCount {
- t.Errorf("Expected %d managed configs, but got %d", expectedManagedCount, count)
- }
-}
diff --git a/caddyhttp/httpserver/logger.go b/caddyhttp/httpserver/logger.go
deleted file mode 100644
index 29e888a5cf0..00000000000
--- a/caddyhttp/httpserver/logger.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package httpserver
-
-import (
- "bytes"
- "io"
- "log"
- "os"
- "strings"
- "sync"
-
- "github.com/hashicorp/go-syslog"
- "github.com/mholt/caddy"
-)
-
-var remoteSyslogPrefixes = map[string]string{
- "syslog+tcp://": "tcp",
- "syslog+udp://": "udp",
- "syslog://": "udp",
-}
-
-// Logger is shared between errors and log plugins and supports both logging to
-// a file (with an optional file roller), local and remote syslog servers.
-type Logger struct {
- Output string
- *log.Logger
- Roller *LogRoller
- writer io.Writer
- fileMu *sync.RWMutex
-}
-
-// NewTestLogger creates logger suitable for testing purposes
-func NewTestLogger(buffer *bytes.Buffer) *Logger {
- return &Logger{
- Logger: log.New(buffer, "", 0),
- fileMu: new(sync.RWMutex),
- }
-}
-
-// Println wraps underlying logger with mutex
-func (l Logger) Println(args ...interface{}) {
- l.fileMu.RLock()
- l.Logger.Println(args...)
- l.fileMu.RUnlock()
-}
-
-// Printf wraps underlying logger with mutex
-func (l Logger) Printf(format string, args ...interface{}) {
- l.fileMu.RLock()
- l.Logger.Printf(format, args...)
- l.fileMu.RUnlock()
-}
-
-// Attach binds logger Start and Close functions to
-// controller's OnStartup and OnShutdown hooks.
-func (l *Logger) Attach(controller *caddy.Controller) {
- if controller != nil {
- // Opens file or connect to local/remote syslog
- controller.OnStartup(l.Start)
-
- // Closes file or disconnects from local/remote syslog
- controller.OnShutdown(l.Close)
- }
-}
-
-type syslogAddress struct {
- network string
- address string
-}
-
-func parseSyslogAddress(location string) *syslogAddress {
- for prefix, network := range remoteSyslogPrefixes {
- if strings.HasPrefix(location, prefix) {
- return &syslogAddress{
- network: network,
- address: strings.TrimPrefix(location, prefix),
- }
- }
- }
-
- return nil
-}
-
-// Start initializes logger opening files or local/remote syslog connections
-func (l *Logger) Start() error {
- // initialize mutex on start
- l.fileMu = new(sync.RWMutex)
-
- var err error
-
-selectwriter:
- switch l.Output {
- case "", "stderr":
- l.writer = os.Stderr
- case "stdout":
- l.writer = os.Stdout
- case "syslog":
- l.writer, err = gsyslog.NewLogger(gsyslog.LOG_ERR, "LOCAL0", "caddy")
- if err != nil {
- return err
- }
- default:
- if address := parseSyslogAddress(l.Output); address != nil {
- l.writer, err = gsyslog.DialLogger(address.network, address.address, gsyslog.LOG_ERR, "LOCAL0", "caddy")
-
- if err != nil {
- return err
- }
-
- break selectwriter
- }
-
- var file *os.File
-
- file, err = os.OpenFile(l.Output, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
- if err != nil {
- return err
- }
-
- if l.Roller != nil {
- file.Close()
- l.Roller.Filename = l.Output
- l.writer = l.Roller.GetLogWriter()
- } else {
- l.writer = file
- }
- }
-
- l.Logger = log.New(l.writer, "", 0)
-
- return nil
-
-}
-
-// Close closes open log files or connections to syslog.
-func (l *Logger) Close() error {
- // don't close stdout or stderr
- if l.writer == os.Stdout || l.writer == os.Stderr {
- return nil
- }
-
- // Will close local/remote syslog connections too :)
- if closer, ok := l.writer.(io.WriteCloser); ok {
- l.fileMu.Lock()
- err := closer.Close()
- l.fileMu.Unlock()
- return err
- }
-
- return nil
-}
diff --git a/caddyhttp/httpserver/logger_test.go b/caddyhttp/httpserver/logger_test.go
deleted file mode 100644
index 0ef08c939e9..00000000000
--- a/caddyhttp/httpserver/logger_test.go
+++ /dev/null
@@ -1,212 +0,0 @@
-//+build linux darwin
-
-package httpserver
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "sync"
- "testing"
-
- syslog "gopkg.in/mcuadros/go-syslog.v2"
- "gopkg.in/mcuadros/go-syslog.v2/format"
-)
-
-func TestLoggingToStdout(t *testing.T) {
- testCases := []struct {
- Output string
- ExpectedOutput string
- }{
- {
- Output: "stdout",
- ExpectedOutput: "Hello world logged to stdout",
- },
- }
-
- for i, testCase := range testCases {
- output := captureStdout(func() {
- logger := Logger{Output: testCase.Output, fileMu: new(sync.RWMutex)}
-
- if err := logger.Start(); err != nil {
- t.Fatalf("Got unexpected error: %v", err)
- }
-
- logger.Println(testCase.ExpectedOutput)
- })
-
- if !strings.Contains(output, testCase.ExpectedOutput) {
- t.Fatalf("Test #%d: Expected output to contain: %s, got: %s", i, testCase.ExpectedOutput, output)
- }
- }
-}
-
-func TestLoggingToStderr(t *testing.T) {
-
- testCases := []struct {
- Output string
- ExpectedOutput string
- }{
- {
- Output: "stderr",
- ExpectedOutput: "Hello world logged to stderr",
- },
- {
- Output: "",
- ExpectedOutput: "Hello world logged to stderr #2",
- },
- }
-
- for i, testCase := range testCases {
- output := captureStderr(func() {
- logger := Logger{Output: testCase.Output, fileMu: new(sync.RWMutex)}
-
- if err := logger.Start(); err != nil {
- t.Fatalf("Got unexpected error: %v", err)
- }
-
- logger.Println(testCase.ExpectedOutput)
- })
-
- if !strings.Contains(output, testCase.ExpectedOutput) {
- t.Fatalf("Test #%d: Expected output to contain: %s, got: %s", i, testCase.ExpectedOutput, output)
- }
- }
-}
-
-func TestLoggingToFile(t *testing.T) {
- file := filepath.Join(os.TempDir(), "access.log")
- expectedOutput := "Hello world written to file"
-
- logger := Logger{Output: file}
-
- if err := logger.Start(); err != nil {
- t.Fatalf("Got unexpected error during logger start: %v", err)
- }
-
- logger.Print(expectedOutput)
-
- content, err := ioutil.ReadFile(file)
- if err != nil {
- t.Fatalf("Could not read log file content: %v", err)
- }
-
- if !bytes.Contains(content, []byte(expectedOutput)) {
- t.Fatalf("Expected log file to contain: %s, got: %s", expectedOutput, string(content))
- }
-
- os.Remove(file)
-}
-
-func TestLoggingToSyslog(t *testing.T) {
-
- testCases := []struct {
- Output string
- ExpectedOutput string
- }{
- {
- Output: "syslog://127.0.0.1:5660",
- ExpectedOutput: "Hello world! Test #1 over tcp",
- },
- {
- Output: "syslog+tcp://127.0.0.1:5661",
- ExpectedOutput: "Hello world! Test #2 over tcp",
- },
- {
- Output: "syslog+udp://127.0.0.1:5662",
- ExpectedOutput: "Hello world! Test #3 over udp",
- },
- }
-
- for i, testCase := range testCases {
-
- ch := make(chan format.LogParts, 256)
- server, err := bootServer(testCase.Output, ch)
- defer server.Kill()
-
- if err != nil {
- t.Errorf("Test #%d: expected no error during syslog server boot, got: %v", i, err)
- }
-
- logger := Logger{Output: testCase.Output, fileMu: new(sync.RWMutex)}
-
- if err := logger.Start(); err != nil {
- t.Errorf("Test #%d: expected no error during logger start, got: %v", i, err)
- }
-
- defer logger.Close()
-
- logger.Print(testCase.ExpectedOutput)
-
- actual := <-ch
-
- if content, ok := actual["content"].(string); ok {
- if !strings.Contains(content, testCase.ExpectedOutput) {
- t.Errorf("Test #%d: expected server to capture content: %s, but got: %s", i, testCase.ExpectedOutput, content)
- }
- } else {
- t.Errorf("Test #%d: expected server to capture content but got: %v", i, actual)
- }
- }
-}
-
-func bootServer(location string, ch chan format.LogParts) (*syslog.Server, error) {
- address := parseSyslogAddress(location)
-
- if address == nil {
- return nil, fmt.Errorf("Could not parse syslog address: %s", location)
- }
-
- server := syslog.NewServer()
- server.SetFormat(syslog.Automatic)
-
- switch address.network {
- case "tcp":
- server.ListenTCP(address.address)
- case "udp":
- server.ListenUDP(address.address)
- }
-
- server.SetHandler(syslog.NewChannelHandler(ch))
-
- if err := server.Boot(); err != nil {
- return nil, err
- }
-
- return server, nil
-}
-
-func captureStdout(f func()) string {
- original := os.Stdout
- r, w, _ := os.Pipe()
-
- os.Stdout = w
-
- f()
-
- w.Close()
-
- written, _ := ioutil.ReadAll(r)
- os.Stdout = original
-
- return string(written)
-}
-
-func captureStderr(f func()) string {
- original := os.Stderr
- r, w, _ := os.Pipe()
-
- os.Stderr = w
-
- f()
-
- w.Close()
-
- written, _ := ioutil.ReadAll(r)
- os.Stderr = original
-
- return string(written)
-}
diff --git a/caddyhttp/httpserver/middleware.go b/caddyhttp/httpserver/middleware.go
deleted file mode 100644
index 48e45a1491b..00000000000
--- a/caddyhttp/httpserver/middleware.go
+++ /dev/null
@@ -1,211 +0,0 @@
-package httpserver
-
-import (
- "fmt"
- "net/http"
- "os"
- "path"
- "time"
-
- "github.com/mholt/caddy"
-)
-
-func init() {
- initCaseSettings()
-}
-
-type (
- // Middleware is the middle layer which represents the traditional
- // idea of middleware: it chains one Handler to the next by being
- // passed the next Handler in the chain.
- Middleware func(Handler) Handler
-
- // ListenerMiddleware is similar to the Middleware type, except it
- // chains one net.Listener to the next.
- ListenerMiddleware func(caddy.Listener) caddy.Listener
-
- // Handler is like http.Handler except ServeHTTP may return a status
- // code and/or error.
- //
- // If ServeHTTP writes the response header, it should return a status
- // code of 0. This signals to other handlers before it that the response
- // is already handled, and that they should not write to it also. Keep
- // in mind that writing to the response body writes the header, too.
- //
- // If ServeHTTP encounters an error, it should return the error value
- // so it can be logged by designated error-handling middleware.
- //
- // If writing a response after calling the next ServeHTTP method, the
- // returned status code SHOULD be used when writing the response.
- //
- // If handling errors after calling the next ServeHTTP method, the
- // returned error value SHOULD be logged or handled accordingly.
- //
- // Otherwise, return values should be propagated down the middleware
- // chain by returning them unchanged.
- Handler interface {
- ServeHTTP(http.ResponseWriter, *http.Request) (int, error)
- }
-
- // HandlerFunc is a convenience type like http.HandlerFunc, except
- // ServeHTTP returns a status code and an error. See Handler
- // documentation for more information.
- HandlerFunc func(http.ResponseWriter, *http.Request) (int, error)
-
- // RequestMatcher checks to see if current request should be handled
- // by underlying handler.
- RequestMatcher interface {
- Match(r *http.Request) bool
- }
-
- // HandlerConfig is a middleware configuration.
- // This makes it possible for middlewares to have a common
- // configuration interface.
- //
- // TODO The long term plan is to get all middleware implement this
- // interface for configurations.
- HandlerConfig interface {
- RequestMatcher
- BasePath() string
- }
-
- // ConfigSelector selects a configuration.
- ConfigSelector []HandlerConfig
-)
-
-// ServeHTTP implements the Handler interface.
-func (f HandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- return f(w, r)
-}
-
-// Select selects a Config.
-// This chooses the config with the longest length.
-func (c ConfigSelector) Select(r *http.Request) (config HandlerConfig) {
- for i := range c {
- if !c[i].Match(r) {
- continue
- }
- if config == nil || len(c[i].BasePath()) > len(config.BasePath()) {
- config = c[i]
- }
- }
- return config
-}
-
-// IndexFile looks for a file in /root/fpath/indexFile for each string
-// in indexFiles. If an index file is found, it returns the root-relative
-// path to the file and true. If no index file is found, empty string
-// and false is returned. fpath must end in a forward slash '/'
-// otherwise no index files will be tried (directory paths must end
-// in a forward slash according to HTTP).
-//
-// All paths passed into and returned from this function use '/' as the
-// path separator, just like URLs. IndexFle handles path manipulation
-// internally for systems that use different path separators.
-func IndexFile(root http.FileSystem, fpath string, indexFiles []string) (string, bool) {
- if fpath[len(fpath)-1] != '/' || root == nil {
- return "", false
- }
- for _, indexFile := range indexFiles {
- // func (http.FileSystem).Open wants all paths separated by "/",
- // regardless of operating system convention, so use
- // path.Join instead of filepath.Join
- fp := path.Join(fpath, indexFile)
- f, err := root.Open(fp)
- if err == nil {
- f.Close()
- return fp, true
- }
- }
- return "", false
-}
-
-// SetLastModifiedHeader checks if the provided modTime is valid and if it is sets it
-// as a Last-Modified header to the ResponseWriter. If the modTime is in the future
-// the current time is used instead.
-func SetLastModifiedHeader(w http.ResponseWriter, modTime time.Time) {
- if modTime.IsZero() || modTime.Equal(time.Unix(0, 0)) {
- // the time does not appear to be valid. Don't put it in the response
- return
- }
-
- // RFC 2616 - Section 14.29 - Last-Modified:
- // An origin server MUST NOT send a Last-Modified date which is later than the
- // server's time of message origination. In such cases, where the resource's last
- // modification would indicate some time in the future, the server MUST replace
- // that date with the message origination date.
- now := currentTime()
- if modTime.After(now) {
- modTime = now
- }
-
- w.Header().Set("Last-Modified", modTime.UTC().Format(http.TimeFormat))
-}
-
-// CaseSensitivePath determines if paths should be case sensitive.
-// This is configurable via CASE_SENSITIVE_PATH environment variable.
-var CaseSensitivePath = true
-
-const caseSensitivePathEnv = "CASE_SENSITIVE_PATH"
-
-// initCaseSettings loads case sensitivity config from environment variable.
-//
-// This could have been in init, but init cannot be called from tests.
-func initCaseSettings() {
- switch os.Getenv(caseSensitivePathEnv) {
- case "0", "false":
- CaseSensitivePath = false
- default:
- CaseSensitivePath = true
- }
-}
-
-// MergeRequestMatchers merges multiple RequestMatchers into one.
-// This allows a middleware to use multiple RequestMatchers.
-func MergeRequestMatchers(matchers ...RequestMatcher) RequestMatcher {
- return requestMatchers(matchers)
-}
-
-type requestMatchers []RequestMatcher
-
-// Match satisfies RequestMatcher interface.
-func (m requestMatchers) Match(r *http.Request) bool {
- for _, matcher := range m {
- if !matcher.Match(r) {
- return false
- }
- }
- return true
-}
-
-// currentTime, as it is defined here, returns time.Now().
-// It's defined as a variable for mocking time in tests.
-var currentTime = func() time.Time { return time.Now() }
-
-// EmptyNext is a no-op function that can be passed into
-// Middleware functions so that the assignment to the
-// Next field of the Handler can be tested.
-//
-// Used primarily for testing but needs to be exported so
-// plugins can use this as a convenience.
-var EmptyNext = HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) { return 0, nil })
-
-// SameNext does a pointer comparison between next1 and next2.
-//
-// Used primarily for testing but needs to be exported so
-// plugins can use this as a convenience.
-func SameNext(next1, next2 Handler) bool {
- return fmt.Sprintf("%v", next1) == fmt.Sprintf("%v", next2)
-}
-
-// Context key constants.
-const (
- // RemoteUserCtxKey is the key for the remote user of the request, if any (basicauth).
- RemoteUserCtxKey caddy.CtxKey = "remote_user"
-
- // MitmCtxKey is the key for the result of MITM detection
- MitmCtxKey caddy.CtxKey = "mitm"
-
- // RequestIDCtxKey is the key for the U4 UUID value
- RequestIDCtxKey caddy.CtxKey = "request_id"
-)
diff --git a/caddyhttp/httpserver/middleware_test.go b/caddyhttp/httpserver/middleware_test.go
deleted file mode 100644
index 2f75c8bb9da..00000000000
--- a/caddyhttp/httpserver/middleware_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package httpserver
-
-import (
- "os"
- "testing"
-)
-
-func TestPathCaseSensitivity(t *testing.T) {
- tests := []struct {
- basePath string
- path string
- caseSensitive bool
- expected bool
- }{
- {"/", "/file", true, true},
- {"/a", "/file", true, false},
- {"/f", "/file", true, true},
- {"/f", "/File", true, false},
- {"/f", "/File", false, true},
- {"/file", "/file", true, true},
- {"/file", "/file", false, true},
- {"/files", "/file", false, false},
- {"/files", "/file", true, false},
- {"/folder", "/folder/file.txt", true, true},
- {"/folders", "/folder/file.txt", true, false},
- {"/folder", "/Folder/file.txt", false, true},
- {"/folders", "/Folder/file.txt", false, false},
- }
-
- for i, test := range tests {
- CaseSensitivePath = test.caseSensitive
- valid := Path(test.path).Matches(test.basePath)
- if test.expected != valid {
- t.Errorf("Test %d: Expected %v, found %v", i, test.expected, valid)
- }
- }
-}
-
-func TestPathCaseSensitiveEnv(t *testing.T) {
- tests := []struct {
- envValue string
- expected bool
- }{
- {"1", true},
- {"0", false},
- {"false", false},
- {"true", true},
- {"", true},
- }
-
- for i, test := range tests {
- os.Setenv(caseSensitivePathEnv, test.envValue)
- initCaseSettings()
- if test.expected != CaseSensitivePath {
- t.Errorf("Test %d: Expected %v, found %v", i, test.expected, CaseSensitivePath)
- }
- }
-}
diff --git a/caddyhttp/httpserver/mitm.go b/caddyhttp/httpserver/mitm.go
deleted file mode 100644
index d058f37c892..00000000000
--- a/caddyhttp/httpserver/mitm.go
+++ /dev/null
@@ -1,748 +0,0 @@
-package httpserver
-
-import (
- "bytes"
- "context"
- "crypto/tls"
- "io"
- "net"
- "net/http"
- "strconv"
- "strings"
- "sync"
-)
-
-// tlsHandler is a http.Handler that will inject a value
-// into the request context indicating if the TLS
-// connection is likely being intercepted.
-type tlsHandler struct {
- next http.Handler
- listener *tlsHelloListener
- closeOnMITM bool // whether to close connection on MITM; TODO: expose through new directive
-}
-
-// ServeHTTP checks the User-Agent. For the four main browsers (Chrome,
-// Edge, Firefox, and Safari) indicated by the User-Agent, the properties
-// of the TLS Client Hello will be compared. The context value "mitm" will
-// be set to a value indicating if it is likely that the underlying TLS
-// connection is being intercepted.
-//
-// Note that due to Microsoft's decision to intentionally make IE/Edge
-// user agents obscure (and look like other browsers), this may offer
-// less accuracy for IE/Edge clients.
-//
-// This MITM detection capability is based on research done by Durumeric,
-// Halderman, et. al. in "The Security Impact of HTTPS Interception" (NDSS '17):
-// https://jhalderm.com/pub/papers/interception-ndss17.pdf
-func (h *tlsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if h.listener == nil {
- h.next.ServeHTTP(w, r)
- return
- }
-
- h.listener.helloInfosMu.RLock()
- info := h.listener.helloInfos[r.RemoteAddr]
- h.listener.helloInfosMu.RUnlock()
-
- ua := r.Header.Get("User-Agent")
-
- var checked, mitm bool
- if r.Header.Get("X-BlueCoat-Via") != "" || // Blue Coat (masks User-Agent header to generic values)
- r.Header.Get("X-FCCKV2") != "" || // Fortinet
- info.advertisesHeartbeatSupport() { // no major browsers have ever implemented Heartbeat
- checked = true
- mitm = true
- } else if strings.Contains(ua, "Edge") || strings.Contains(ua, "MSIE") ||
- strings.Contains(ua, "Trident") {
- checked = true
- mitm = !info.looksLikeEdge()
- } else if strings.Contains(ua, "Chrome") {
- checked = true
- mitm = !info.looksLikeChrome()
- } else if strings.Contains(ua, "CriOS") {
- // Chrome on iOS sometimes uses iOS-provided TLS stack (which looks exactly like Safari)
- // but for connections that don't render a web page (favicon, etc.) it uses its own...
- checked = true
- mitm = !info.looksLikeChrome() && !info.looksLikeSafari()
- } else if strings.Contains(ua, "Firefox") {
- checked = true
- if strings.Contains(ua, "Windows") {
- ver := getVersion(ua, "Firefox")
- if ver == 45.0 || ver == 52.0 {
- mitm = !info.looksLikeTor()
- } else {
- mitm = !info.looksLikeFirefox()
- }
- } else {
- mitm = !info.looksLikeFirefox()
- }
- } else if strings.Contains(ua, "Safari") {
- checked = true
- mitm = !info.looksLikeSafari()
- }
-
- if checked {
- r = r.WithContext(context.WithValue(r.Context(), MitmCtxKey, mitm))
- }
-
- if mitm && h.closeOnMITM {
- // TODO: This termination might need to happen later in the middleware
- // chain in order to be picked up by the log directive, in case the site
- // owner still wants to log this event. It'll probably require a new
- // directive. If this feature is useful, we can finish implementing this.
- r.Close = true
- return
- }
-
- h.next.ServeHTTP(w, r)
-}
-
-// getVersion returns a (possibly simplified) representation of the version string
-// from a UserAgent string. It returns a float, so it can represent major and minor
-// versions; the rest of the version is just tacked on behind the decimal point.
-// The purpose of this is to stay simple while allowing for basic, fast comparisons.
-// If the version for softwareName is not found in ua, -1 is returned.
-func getVersion(ua, softwareName string) float64 {
- search := softwareName + "/"
- start := strings.Index(ua, search)
- if start < 0 {
- return -1
- }
- start += len(search)
- end := strings.Index(ua[start:], " ")
- if end < 0 {
- end = len(ua)
- } else {
- end += start
- }
- strVer := strings.Replace(ua[start:end], "-", "", -1)
- firstDot := strings.Index(strVer, ".")
- if firstDot >= 0 {
- strVer = strVer[:firstDot+1] + strings.Replace(strVer[firstDot+1:], ".", "", -1)
- }
- ver, err := strconv.ParseFloat(strVer, 64)
- if err != nil {
- return -1
- }
- return ver
-}
-
-// clientHelloConn reads the ClientHello
-// and stores it in the attached listener.
-type clientHelloConn struct {
- net.Conn
- listener *tlsHelloListener
- readHello bool // whether ClientHello has been read
- buf *bytes.Buffer
-}
-
-// Read reads from c.Conn (by letting the standard library
-// do the reading off the wire), with the exception of
-// getting a copy of the ClientHello so it can parse it.
-func (c *clientHelloConn) Read(b []byte) (n int, err error) {
- // if we've already read the ClientHello, pass thru
- if c.readHello {
- return c.Conn.Read(b)
- }
-
- // we let the standard lib read off the wire for us, and
- // tee that into our buffer so we can read the ClientHello
- tee := io.TeeReader(c.Conn, c.buf)
- n, err = tee.Read(b)
- if err != nil {
- return
- }
- if c.buf.Len() < 5 {
- return // need to read more bytes for header
- }
-
- // read the header bytes
- hdr := make([]byte, 5)
- _, err = io.ReadFull(c.buf, hdr)
- if err != nil {
- return // this would be highly unusual and sad
- }
-
- // get length of the ClientHello message and read it
- length := int(uint16(hdr[3])<<8 | uint16(hdr[4]))
- if c.buf.Len() < length {
- return // need to read more bytes
- }
- hello := make([]byte, length)
- _, err = io.ReadFull(c.buf, hello)
- if err != nil {
- return
- }
- bufpool.Put(c.buf) // buffer no longer needed
-
- // parse the ClientHello and store it in the map
- rawParsed := parseRawClientHello(hello)
- c.listener.helloInfosMu.Lock()
- c.listener.helloInfos[c.Conn.RemoteAddr().String()] = rawParsed
- c.listener.helloInfosMu.Unlock()
-
- c.readHello = true
- return
-}
-
-// parseRawClientHello parses data which contains the raw
-// TLS Client Hello message. It extracts relevant information
-// into info. Any error reading the Client Hello (such as
-// insufficient length or invalid length values) results in
-// a silent error and an incomplete info struct, since there
-// is no good way to handle an error like this during Accept().
-// The data is expected to contain the whole ClientHello and
-// ONLY the ClientHello.
-//
-// The majority of this code is borrowed from the Go standard
-// library, which is (c) The Go Authors. It has been modified
-// to fit this use case.
-func parseRawClientHello(data []byte) (info rawHelloInfo) {
- if len(data) < 42 {
- return
- }
- sessionIDLen := int(data[38])
- if sessionIDLen > 32 || len(data) < 39+sessionIDLen {
- return
- }
- data = data[39+sessionIDLen:]
- if len(data) < 2 {
- return
- }
- // cipherSuiteLen is the number of bytes of cipher suite numbers. Since
- // they are uint16s, the number must be even.
- cipherSuiteLen := int(data[0])<<8 | int(data[1])
- if cipherSuiteLen%2 == 1 || len(data) < 2+cipherSuiteLen {
- return
- }
- numCipherSuites := cipherSuiteLen / 2
- // read in the cipher suites
- info.cipherSuites = make([]uint16, numCipherSuites)
- for i := 0; i < numCipherSuites; i++ {
- info.cipherSuites[i] = uint16(data[2+2*i])<<8 | uint16(data[3+2*i])
- }
- data = data[2+cipherSuiteLen:]
- if len(data) < 1 {
- return
- }
- // read in the compression methods
- compressionMethodsLen := int(data[0])
- if len(data) < 1+compressionMethodsLen {
- return
- }
- info.compressionMethods = data[1 : 1+compressionMethodsLen]
-
- data = data[1+compressionMethodsLen:]
-
- // ClientHello is optionally followed by extension data
- if len(data) < 2 {
- return
- }
- extensionsLength := int(data[0])<<8 | int(data[1])
- data = data[2:]
- if extensionsLength != len(data) {
- return
- }
-
- // read in each extension, and extract any relevant information
- // from extensions we care about
- for len(data) != 0 {
- if len(data) < 4 {
- return
- }
- extension := uint16(data[0])<<8 | uint16(data[1])
- length := int(data[2])<<8 | int(data[3])
- data = data[4:]
- if len(data) < length {
- return
- }
-
- // record that the client advertised support for this extension
- info.extensions = append(info.extensions, extension)
-
- switch extension {
- case extensionSupportedCurves:
- // http://tools.ietf.org/html/rfc4492#section-5.5.1
- if length < 2 {
- return
- }
- l := int(data[0])<<8 | int(data[1])
- if l%2 == 1 || length != l+2 {
- return
- }
- numCurves := l / 2
- info.curves = make([]tls.CurveID, numCurves)
- d := data[2:]
- for i := 0; i < numCurves; i++ {
- info.curves[i] = tls.CurveID(d[0])<<8 | tls.CurveID(d[1])
- d = d[2:]
- }
- case extensionSupportedPoints:
- // http://tools.ietf.org/html/rfc4492#section-5.5.2
- if length < 1 {
- return
- }
- l := int(data[0])
- if length != l+1 {
- return
- }
- info.points = make([]uint8, l)
- copy(info.points, data[1:])
- }
-
- data = data[length:]
- }
-
- return
-}
-
-// newTLSListener returns a new tlsHelloListener that wraps ln.
-func newTLSListener(ln net.Listener, config *tls.Config) *tlsHelloListener {
- return &tlsHelloListener{
- Listener: ln,
- config: config,
- helloInfos: make(map[string]rawHelloInfo),
- }
-}
-
-// tlsHelloListener is a TLS listener that is specially designed
-// to read the ClientHello manually so we can extract necessary
-// information from it. Each ClientHello message is mapped by
-// the remote address of the client, which must be removed when
-// the connection is closed (use ConnState).
-type tlsHelloListener struct {
- net.Listener
- config *tls.Config
- helloInfos map[string]rawHelloInfo
- helloInfosMu sync.RWMutex
-}
-
-// Accept waits for and returns the next connection to the listener.
-// After it accepts the underlying connection, it reads the
-// ClientHello message and stores the parsed data into a map on l.
-func (l *tlsHelloListener) Accept() (net.Conn, error) {
- conn, err := l.Listener.Accept()
- if err != nil {
- return nil, err
- }
- buf := bufpool.Get().(*bytes.Buffer)
- buf.Reset()
- helloConn := &clientHelloConn{Conn: conn, listener: l, buf: buf}
- return tls.Server(helloConn, l.config), nil
-}
-
-// rawHelloInfo contains the "raw" data parsed from the TLS
-// Client Hello. No interpretation is done on the raw data.
-//
-// The methods on this type implement heuristics described
-// by Durumeric, Halderman, et. al. in
-// "The Security Impact of HTTPS Interception":
-// https://jhalderm.com/pub/papers/interception-ndss17.pdf
-type rawHelloInfo struct {
- cipherSuites []uint16
- extensions []uint16
- compressionMethods []byte
- curves []tls.CurveID
- points []uint8
-}
-
-// advertisesHeartbeatSupport returns true if info indicates
-// that the client supports the Heartbeat extension.
-func (info rawHelloInfo) advertisesHeartbeatSupport() bool {
- for _, ext := range info.extensions {
- if ext == extensionHeartbeat {
- return true
- }
- }
- return false
-}
-
-// looksLikeFirefox returns true if info looks like a handshake
-// from a modern version of Firefox.
-func (info rawHelloInfo) looksLikeFirefox() bool {
- // "To determine whether a Firefox session has been
- // intercepted, we check for the presence and order
- // of extensions, cipher suites, elliptic curves,
- // EC point formats, and handshake compression methods." (early 2016)
-
- // We check for the presence and order of the extensions.
- // Note: Sometimes 0x15 (21, padding) is present, sometimes not.
- // Note: Firefox 51+ does not advertise 0x3374 (13172, NPN).
- // Note: Firefox doesn't advertise 0x0 (0, SNI) when connecting to IP addresses.
- // Note: Firefox 55+ doesn't appear to advertise 0xFF03 (65283, short headers). It used to be between 5 and 13.
- // Note: Firefox on Fedora (or RedHat) doesn't include ECC suites because of patent liability.
- requiredExtensionsOrder := []uint16{23, 65281, 10, 11, 35, 16, 5, 13}
- if !assertPresenceAndOrdering(requiredExtensionsOrder, info.extensions, true) {
- return false
- }
-
- // We check for both presence of curves and their ordering.
- requiredCurves := []tls.CurveID{29, 23, 24, 25}
- if len(info.curves) < len(requiredCurves) {
- return false
- }
- for i := range requiredCurves {
- if info.curves[i] != requiredCurves[i] {
- return false
- }
- }
- if len(info.curves) > len(requiredCurves) {
- // newer Firefox (55 Nightly?) may have additional curves at end of list
- allowedCurves := []tls.CurveID{256, 257}
- for i := range allowedCurves {
- if info.curves[len(requiredCurves)+i] != allowedCurves[i] {
- return false
- }
- }
- }
-
- if hasGreaseCiphers(info.cipherSuites) {
- return false
- }
-
- // We check for order of cipher suites but not presence, since
- // according to the paper, cipher suites may be not be added
- // or reordered by the user, but they may be disabled.
- expectedCipherSuiteOrder := []uint16{
- TLS_AES_128_GCM_SHA256, // 0x1301
- TLS_CHACHA20_POLY1305_SHA256, // 0x1303
- TLS_AES_256_GCM_SHA384, // 0x1302
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, // 0xc02b
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // 0xc02f
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, // 0xcca9
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, // 0xcca8
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, // 0xc02c
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, // 0xc030
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // 0xc00a
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // 0xc009
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // 0xc013
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // 0xc014
- TLS_DHE_RSA_WITH_AES_128_CBC_SHA, // 0x33
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA, // 0x39
- tls.TLS_RSA_WITH_AES_128_CBC_SHA, // 0x2f
- tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // 0xa
- }
- return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.cipherSuites, false)
-}
-
-// looksLikeChrome returns true if info looks like a handshake
-// from a modern version of Chrome.
-func (info rawHelloInfo) looksLikeChrome() bool {
- // "We check for ciphers and extensions that Chrome is known
- // to not support, but do not check for the inclusion of
- // specific ciphers or extensions, nor do we validate their
- // order. When appropriate, we check the presence and order
- // of elliptic curves, compression methods, and EC point formats." (early 2016)
-
- // Not in Chrome 56, but present in Safari 10 (Feb. 2017):
- // TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 (0xc024)
- // TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 (0xc023)
- // TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA (0xc00a)
- // TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA (0xc009)
- // TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 (0xc028)
- // TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 (0xc027)
- // TLS_RSA_WITH_AES_256_CBC_SHA256 (0x3d)
- // TLS_RSA_WITH_AES_128_CBC_SHA256 (0x3c)
-
- // Not in Chrome 56, but present in Firefox 51 (Feb. 2017):
- // TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA (0xc00a)
- // TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA (0xc009)
- // TLS_DHE_RSA_WITH_AES_128_CBC_SHA (0x33)
- // TLS_DHE_RSA_WITH_AES_256_CBC_SHA (0x39)
-
- // Selected ciphers present in Chrome mobile (Feb. 2017):
- // 0xc00a, 0xc014, 0xc009, 0x9c, 0x9d, 0x2f, 0x35, 0xa
-
- chromeCipherExclusions := map[uint16]struct{}{
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384: {}, // 0xc024
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: {}, // 0xc023
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384: {}, // 0xc028
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: {}, // 0xc027
- TLS_RSA_WITH_AES_256_CBC_SHA256: {}, // 0x3d
- tls.TLS_RSA_WITH_AES_128_CBC_SHA256: {}, // 0x3c
- TLS_DHE_RSA_WITH_AES_128_CBC_SHA: {}, // 0x33
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA: {}, // 0x39
- }
- for _, ext := range info.cipherSuites {
- if _, ok := chromeCipherExclusions[ext]; ok {
- return false
- }
- }
-
- // Chrome does not include curve 25 (CurveP521) (as of Chrome 56, Feb. 2017).
- for _, curve := range info.curves {
- if curve == 25 {
- return false
- }
- }
-
- if !hasGreaseCiphers(info.cipherSuites) {
- return false
- }
-
- return true
-}
-
-// looksLikeEdge returns true if info looks like a handshake
-// from a modern version of MS Edge.
-func (info rawHelloInfo) looksLikeEdge() bool {
- // "SChannel connections can by uniquely identified because SChannel
- // is the only TLS library we tested that includes the OCSP status
- // request extension before the supported groups and EC point formats
- // extensions." (early 2016)
- //
- // More specifically, the OCSP status request extension appears
- // *directly* before the other two extensions, which occur in that
- // order. (I contacted the authors for clarification and verified it.)
- for i, ext := range info.extensions {
- if ext == extensionOCSPStatusRequest {
- if len(info.extensions) <= i+2 {
- return false
- }
- if info.extensions[i+1] != extensionSupportedCurves ||
- info.extensions[i+2] != extensionSupportedPoints {
- return false
- }
- }
- }
-
- for _, cs := range info.cipherSuites {
- // As of Feb. 2017, Edge does not have 0xff, but Avast adds it
- if cs == scsvRenegotiation {
- return false
- }
- // Edge and modern IE do not have 0x4 or 0x5, but Blue Coat does
- if cs == TLS_RSA_WITH_RC4_128_MD5 || cs == tls.TLS_RSA_WITH_RC4_128_SHA {
- return false
- }
- }
-
- if hasGreaseCiphers(info.cipherSuites) {
- return false
- }
-
- return true
-}
-
-// looksLikeSafari returns true if info looks like a handshake
-// from a modern version of MS Safari.
-func (info rawHelloInfo) looksLikeSafari() bool {
- // "One unique aspect of Secure Transport is that it includes
- // the TLS_EMPTY_RENEGOTIATION_INFO_SCSV (0xff) cipher first,
- // whereas the other libraries we investigated include the
- // cipher last. Similar to Microsoft, Apple has changed
- // TLS behavior in minor OS updates, which are not indicated
- // in the HTTP User-Agent header. We allow for any of the
- // updates when validating handshakes, and we check for the
- // presence and ordering of ciphers, extensions, elliptic
- // curves, and compression methods." (early 2016)
-
- // Note that any C lib (e.g. curl) compiled on macOS
- // will probably use Secure Transport which will also
- // share the TLS handshake characteristics of Safari.
-
- // We check for the presence and order of the extensions.
- requiredExtensionsOrder := []uint16{10, 11, 13, 13172, 16, 5, 18, 23}
- if !assertPresenceAndOrdering(requiredExtensionsOrder, info.extensions, true) {
- // Safari on iOS 11 (beta) uses different set/ordering of extensions
- requiredExtensionsOrderiOS11 := []uint16{65281, 0, 23, 13, 5, 13172, 18, 16, 11, 10}
- if !assertPresenceAndOrdering(requiredExtensionsOrderiOS11, info.extensions, true) {
- return false
- }
- } else {
- // For these versions of Safari, expect TLS_EMPTY_RENEGOTIATION_INFO_SCSV first.
- if len(info.cipherSuites) < 1 {
- return false
- }
- if info.cipherSuites[0] != scsvRenegotiation {
- return false
- }
- }
-
- if hasGreaseCiphers(info.cipherSuites) {
- return false
- }
-
- // We check for order and presence of cipher suites
- expectedCipherSuiteOrder := []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, // 0xc02c
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, // 0xc02b
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, // 0xc024
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, // 0xc023
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // 0xc00a
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // 0xc009
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, // 0xc030
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // 0xc02f
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, // 0xc028
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, // 0xc027
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // 0xc014
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // 0xc013
- tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // 0x9d
- tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // 0x9c
- TLS_RSA_WITH_AES_256_CBC_SHA256, // 0x3d
- tls.TLS_RSA_WITH_AES_128_CBC_SHA256, // 0x3c
- tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35
- tls.TLS_RSA_WITH_AES_128_CBC_SHA, // 0x2f
- }
- return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.cipherSuites, true)
-}
-
-// looksLikeTor returns true if the info looks like a ClientHello from Tor browser
-// (based on Firefox).
-func (info rawHelloInfo) looksLikeTor() bool {
- requiredExtensionsOrder := []uint16{10, 11, 16, 5, 13}
- if !assertPresenceAndOrdering(requiredExtensionsOrder, info.extensions, true) {
- return false
- }
-
- // check for session tickets support; Tor doesn't support them to prevent tracking
- for _, ext := range info.extensions {
- if ext == 35 {
- return false
- }
- }
-
- // We check for both presence of curves and their ordering, including
- // an optional curve at the beginning (for Tor based on Firefox 52)
- infoCurves := info.curves
- if len(info.curves) == 4 {
- if info.curves[0] != 29 {
- return false
- }
- infoCurves = info.curves[1:]
- }
- requiredCurves := []tls.CurveID{23, 24, 25}
- if len(infoCurves) < len(requiredCurves) {
- return false
- }
- for i := range requiredCurves {
- if infoCurves[i] != requiredCurves[i] {
- return false
- }
- }
-
- if hasGreaseCiphers(info.cipherSuites) {
- return false
- }
-
- // We check for order of cipher suites but not presence, since
- // according to the paper, cipher suites may be not be added
- // or reordered by the user, but they may be disabled.
- expectedCipherSuiteOrder := []uint16{
- TLS_AES_128_GCM_SHA256, // 0x1301
- TLS_CHACHA20_POLY1305_SHA256, // 0x1303
- TLS_AES_256_GCM_SHA384, // 0x1302
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, // 0xc02b
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // 0xc02f
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, // 0xcca9
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, // 0xcca8
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, // 0xc02c
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, // 0xc030
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // 0xc00a
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // 0xc009
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // 0xc013
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // 0xc014
- TLS_DHE_RSA_WITH_AES_128_CBC_SHA, // 0x33
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA, // 0x39
- tls.TLS_RSA_WITH_AES_128_CBC_SHA, // 0x2f
- tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // 0xa
- }
- return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.cipherSuites, false)
-}
-
-// assertPresenceAndOrdering will return true if candidateList contains
-// the items in requiredItems in the same order as requiredItems.
-//
-// If requiredIsSubset is true, then all items in requiredItems must be
-// present in candidateList. If requiredIsSubset is false, then requiredItems
-// may contain items that are not in candidateList.
-//
-// In all cases, the order of requiredItems is enforced.
-func assertPresenceAndOrdering(requiredItems, candidateList []uint16, requiredIsSubset bool) bool {
- superset := requiredItems
- subset := candidateList
- if requiredIsSubset {
- superset = candidateList
- subset = requiredItems
- }
-
- var j int
- for _, item := range subset {
- var found bool
- for j < len(superset) {
- if superset[j] == item {
- found = true
- break
- }
- j++
- }
- if j == len(superset) && !found {
- return false
- }
- }
- return true
-}
-
-func hasGreaseCiphers(cipherSuites []uint16) bool {
- for _, cipher := range cipherSuites {
- if _, ok := greaseCiphers[cipher]; ok {
- return true
- }
- }
- return false
-}
-
-// pool buffers so we can reuse allocations over time
-var bufpool = sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
-}
-
-var greaseCiphers = map[uint16]struct{}{
- 0x0A0A: {},
- 0x1A1A: {},
- 0x2A2A: {},
- 0x3A3A: {},
- 0x4A4A: {},
- 0x5A5A: {},
- 0x6A6A: {},
- 0x7A7A: {},
- 0x8A8A: {},
- 0x9A9A: {},
- 0xAAAA: {},
- 0xBABA: {},
- 0xCACA: {},
- 0xDADA: {},
- 0xEAEA: {},
- 0xFAFA: {},
-}
-
-// Define variables used for TLS communication
-const (
- extensionOCSPStatusRequest = 5
- extensionSupportedCurves = 10 // also called "SupportedGroups"
- extensionSupportedPoints = 11
- extensionHeartbeat = 15
-
- scsvRenegotiation = 0xff
-
- // cipher suites missing from the crypto/tls package,
- // in no particular order here
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xc024
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xc028
- TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x3d
- TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x33
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x39
- TLS_RSA_WITH_RC4_128_MD5 = 0x4
-
- // new PSK ciphers introduced by TLS 1.3, not (yet) in crypto/tls
- // https://tlswg.github.io/tls13-spec/#rfc.appendix.A.4)
- TLS_AES_128_GCM_SHA256 = 0x1301
- TLS_AES_256_GCM_SHA384 = 0x1302
- TLS_CHACHA20_POLY1305_SHA256 = 0x1303
- TLS_AES_128_CCM_SHA256 = 0x1304
- TLS_AES_128_CCM_8_SHA256 = 0x1305
-)
diff --git a/caddyhttp/httpserver/mitm_test.go b/caddyhttp/httpserver/mitm_test.go
deleted file mode 100644
index 82df34af623..00000000000
--- a/caddyhttp/httpserver/mitm_test.go
+++ /dev/null
@@ -1,399 +0,0 @@
-package httpserver
-
-import (
- "crypto/tls"
- "encoding/hex"
- "net/http"
- "net/http/httptest"
- "reflect"
- "testing"
-)
-
-func TestParseClientHello(t *testing.T) {
- for i, test := range []struct {
- inputHex string
- expected rawHelloInfo
- }{
- {
- // curl 7.51.0 (x86_64-apple-darwin16.0) libcurl/7.51.0 SecureTransport zlib/1.2.8
- inputHex: `010000a6030358a28c73a71bdfc1f09dee13fecdc58805dcce42ac44254df548f14645f7dc2c00004400ffc02cc02bc024c023c00ac009c008c030c02fc028c027c014c013c012009f009e006b0067003900330016009d009c003d003c0035002f000a00af00ae008d008c008b01000039000a00080006001700180019000b00020100000d00120010040102010501060104030203050306030005000501000000000012000000170000`,
- expected: rawHelloInfo{
- cipherSuites: []uint16{255, 49196, 49195, 49188, 49187, 49162, 49161, 49160, 49200, 49199, 49192, 49191, 49172, 49171, 49170, 159, 158, 107, 103, 57, 51, 22, 157, 156, 61, 60, 53, 47, 10, 175, 174, 141, 140, 139},
- extensions: []uint16{10, 11, 13, 5, 18, 23},
- compressionMethods: []byte{0},
- curves: []tls.CurveID{23, 24, 25},
- points: []uint8{0},
- },
- },
- {
- // Chrome 56
- inputHex: `010000c003031dae75222dae1433a5a283ddcde8ddabaefbf16d84f250eee6fdff48cdfff8a00000201a1ac02bc02fc02cc030cca9cca8cc14cc13c013c014009c009d002f0035000a010000777a7a0000ff010001000000000e000c0000096c6f63616c686f73740017000000230000000d00140012040308040401050308050501080606010201000500050100000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a000a0008aaaa001d001700182a2a000100`,
- expected: rawHelloInfo{
- cipherSuites: []uint16{6682, 49195, 49199, 49196, 49200, 52393, 52392, 52244, 52243, 49171, 49172, 156, 157, 47, 53, 10},
- extensions: []uint16{31354, 65281, 0, 23, 35, 13, 5, 18, 16, 30032, 11, 10, 10794},
- compressionMethods: []byte{0},
- curves: []tls.CurveID{43690, 29, 23, 24},
- points: []uint8{0},
- },
- },
- {
- // Firefox 51
- inputHex: `010000bd030375f9022fc3a6562467f3540d68013b2d0b961979de6129e944efe0b35531323500001ec02bc02fcca9cca8c02cc030c00ac009c013c01400330039002f0035000a010000760000000e000c0000096c6f63616c686f737400170000ff01000100000a000a0008001d001700180019000b00020100002300000010000e000c02683208687474702f312e31000500050100000000ff030000000d0020001e040305030603020308040805080604010501060102010402050206020202`,
- expected: rawHelloInfo{
- cipherSuites: []uint16{49195, 49199, 52393, 52392, 49196, 49200, 49162, 49161, 49171, 49172, 51, 57, 47, 53, 10},
- extensions: []uint16{0, 23, 65281, 10, 11, 35, 16, 5, 65283, 13},
- compressionMethods: []byte{0},
- curves: []tls.CurveID{29, 23, 24, 25},
- points: []uint8{0},
- },
- },
- {
- // openssl s_client (OpenSSL 0.9.8zh 14 Jan 2016)
- inputHex: `0100012b03035d385236b8ca7b7946fa0336f164e76bf821ed90e8de26d97cc677671b6f36380000acc030c02cc028c024c014c00a00a500a300a1009f006b006a0069006800390038003700360088008700860085c032c02ec02ac026c00fc005009d003d00350084c02fc02bc027c023c013c00900a400a200a0009e00670040003f003e0033003200310030009a0099009800970045004400430042c031c02dc029c025c00ec004009c003c002f009600410007c011c007c00cc00200050004c012c008001600130010000dc00dc003000a00ff0201000055000b000403000102000a001c001a00170019001c001b0018001a0016000e000d000b000c0009000a00230000000d0020001e060106020603050105020503040104020403030103020303020102020203000f000101`,
- expected: rawHelloInfo{
- cipherSuites: []uint16{49200, 49196, 49192, 49188, 49172, 49162, 165, 163, 161, 159, 107, 106, 105, 104, 57, 56, 55, 54, 136, 135, 134, 133, 49202, 49198, 49194, 49190, 49167, 49157, 157, 61, 53, 132, 49199, 49195, 49191, 49187, 49171, 49161, 164, 162, 160, 158, 103, 64, 63, 62, 51, 50, 49, 48, 154, 153, 152, 151, 69, 68, 67, 66, 49201, 49197, 49193, 49189, 49166, 49156, 156, 60, 47, 150, 65, 7, 49169, 49159, 49164, 49154, 5, 4, 49170, 49160, 22, 19, 16, 13, 49165, 49155, 10, 255},
- extensions: []uint16{11, 10, 35, 13, 15},
- compressionMethods: []byte{1, 0},
- curves: []tls.CurveID{23, 25, 28, 27, 24, 26, 22, 14, 13, 11, 12, 9, 10},
- points: []uint8{0, 1, 2},
- },
- },
- } {
- data, err := hex.DecodeString(test.inputHex)
- if err != nil {
- t.Fatalf("Test %d: Could not decode hex data: %v", i, err)
- }
- actual := parseRawClientHello(data)
- if !reflect.DeepEqual(test.expected, actual) {
- t.Errorf("Test %d: Expected %+v; got %+v", i, test.expected, actual)
- }
- }
-}
-
-func TestHeuristicFunctionsAndHandler(t *testing.T) {
- // To test the heuristics, we assemble a collection of real
- // ClientHello messages from various TLS clients, both genuine
- // and intercepted. Please be sure to hex-encode them and
- // document the User-Agent associated with the connection
- // as well as any intercepting proxy as thoroughly as possible.
- //
- // If the TLS client used is not an HTTP client (e.g. s_client),
- // you can leave the userAgent blank, but please use a comment
- // to document crucial missing information such as client name,
- // version, and platform, maybe even the date you collected
- // the sample! Please group similar clients together, ordered
- // by version for convenience.
-
- // clientHello pairs a User-Agent string to its ClientHello message.
- type clientHello struct {
- userAgent string
- helloHex string // do NOT include the header, just the ClientHello message
- interception bool // if test case shows an interception, set to true
- reqHeaders http.Header // if the request should set any headers to imitate a browser or proxy
- }
-
- // clientHellos groups samples of true (real) ClientHellos by the
- // name of the browser that produced them. We limit the set of
- // browsers to those we are programmed to protect, as well as a
- // category for "Other" which contains real ClientHello messages
- // from clients that we do not recognize, which may be used to
- // test or imitate interception scenarios.
- //
- // Please group similar clients and order by version for convenience
- // when adding to the test cases.
- clientHellos := map[string][]clientHello{
- "Chrome": {
- {
- userAgent: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
- helloHex: `010000c003031dae75222dae1433a5a283ddcde8ddabaefbf16d84f250eee6fdff48cdfff8a00000201a1ac02bc02fc02cc030cca9cca8cc14cc13c013c014009c009d002f0035000a010000777a7a0000ff010001000000000e000c0000096c6f63616c686f73740017000000230000000d00140012040308040401050308050501080606010201000500050100000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a000a0008aaaa001d001700182a2a000100`,
- interception: false,
- },
- {
- // Chrome on iOS will use iOS' TLS stack for requests that load
- // the web page (apparently required by the dev ToS) but will use its
- // own TLS stack for everything else, it seems.
-
- // Chrome on iOS
- userAgent: "Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_2 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.79 Mobile/14A456 Safari/602.1",
- helloHex: `010000de030358b062c509b21410a6496b5a82bfec74436cdecebe8ea1da29799939bbd3c17200002c00ffc02cc02bc024c023c00ac009c008c030c02fc028c027c014c013c012009d009c003d003c0035002f000a0100008900000014001200000f66696e6572706978656c732e636f6d000a00080006001700180019000b00020100000d00120010040102010501060104030203050306033374000000100030002e0268320568322d31360568322d31350568322d313408737064792f332e3106737064792f3308687474702f312e310005000501000000000012000000170000`,
- },
- {
- // Chrome on iOS (requesting favicon)
- userAgent: "Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_2 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.79 Mobile/14A456 Safari/602.1",
- helloHex: `010000c20303863eb64788e3b9638c261300318411cbdd8f09576d58eec1e744b6ce944f574f0000208a8acca9cca8cc14cc13c02bc02fc02cc030c013c014009c009d002f0035000a01000079baba0000ff0100010000000014001200000f66696e6572706978656c732e636f6d0017000000230000000d00140012040308040401050308050501080606010201000500050100000000001200000010000e000c02683208687474702f312e31000b00020100000a000a00083a3a001d001700184a4a000100`,
- },
- {
- userAgent: "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
- helloHex: `010000c603036f717a88212c3e9e41940f82c42acb3473e0e4a64e8f52d9af33d34e972e08a30000206a6ac02bc02fc02cc030cca9cca8cc14cc13c013c014009c009d002f0035000a0100007d7a7a0000ff0100010000000014001200000f66696e6572706978656c732e636f6d0017000000230000000d00140012040308040401050308050501080606010201000500050100000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a000a00087a7a001d001700188a8a000100`,
- interception: false,
- },
- {
- userAgent: "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
- helloHex: `010001fc030383141d213d1bf069171843489faf808028d282c9828e1ba87637c863833c730720a67e76e152f4b704523b72317ef4587e231f02e2395e0ecac6be9f28c35e6ce600208a8ac02bc02fc02cc030cca9cca8cc14cc13c013c014009c009d002f0035000a010001931a1a0000ff0100010000000014001200000f66696e6572706978656c732e636f6d00170000002300785e85429bf1764f33111cd3ad5d1c56d765976fd962b49dbecbb6f7865e2a8d8536ad854f1fa99a8bbbf998814fee54a63a0bf162869d2bba37e9778304e7c4140825718e191b574c6246a0611de6447bdd80417f83ff9d9b7124069a9f74b90394ecb89bec5f6a1a67c1b89e50b8674782f53dd51807651a000d00140012040308040401050308050501080606010201000500050100000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a000a00081a1a001d001700182a2a0001000015009a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000`,
- interception: false,
- },
- {
- userAgent: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
- helloHex: `010000c203034166c97e2016046e0c88ad867c410d0aee470f4d9b4ec8fe41a751d2a6348e3100001c4a4ac02bc02fc02cc030cca9cca8c013c014009c009d002f0035000a0100007dcaca0000ff0100010000000014001200000f66696e6572706978656c732e636f6d0017000000230000000d00140012040308040401050308050501080606010201000500050100000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a000a00086a6a001d001700187a7a000100`,
- interception: false,
- },
- {
- userAgent: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
- helloHex: `010000c203037741795e73cd5b4949f79a0dc9cccc8b006e4c0ec324f965c6fe9f0833909f0100001c7a7ac02bc02fc02cc030cca9cca8c013c014009c009d002f0035000a0100007d7a7a0000ff0100010000000014001200000f66696e6572706978656c732e636f6d0017000000230000000d00140012040308040401050308050501080606010201000500050100000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a000a00084a4a001d001700185a5a000100`,
- interception: false,
- },
- },
- "Firefox": {
- {
- userAgent: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:51.0) Gecko/20100101 Firefox/51.0",
- helloHex: `010000bd030375f9022fc3a6562467f3540d68013b2d0b961979de6129e944efe0b35531323500001ec02bc02fcca9cca8c02cc030c00ac009c013c01400330039002f0035000a010000760000000e000c0000096c6f63616c686f737400170000ff01000100000a000a0008001d001700180019000b00020100002300000010000e000c02683208687474702f312e31000500050100000000ff030000000d0020001e040305030603020308040805080604010501060102010402050206020202`,
- interception: false,
- },
- {
- userAgent: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:53.0) Gecko/20100101 Firefox/53.0",
- helloHex: `010001fc0303c99d54ae0628bbb9fea3833a4244c6a712cac9d7738f4930b8b9d8e2f6bd578220f7936cedb48907981c9292fb08ceee6f59bd6fddb3d4271ccd7c12380c5038ab001ec02bc02fcca9cca8c02cc030c00ac009c013c01400330039002f0035000a01000195001500af000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000c0000096c6f63616c686f737400170000ff01000100000a000a0008001d001700180019000b000201000023007886da2d41843ff42131b856982c19a545837b70e604325423a817d925e9d95bd084737682cea6b804dfb7cbe336a3b27b8d520d57520c29cfe5f4f3d3236183b84b05c18f0ca30bf598111e390086fea00d9631f1f78527277eb7838b86e73c4e5d15b55d086b1a4a8aa29f12a55126c6274bcd499bbeb23a0010000e000c02683208687474702f312e31000500050100000000000d0018001604030503060308040805080604010501060102030201`,
- interception: false,
- },
- {
- userAgent: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:53.0) Gecko/20100101 Firefox/53.0",
- helloHex: `010000b1030365d899820b999245d571c2f7d6b850f63ad931d3c68ceb9cf5a508421a871dc500001ec02bc02fcca9cca8c02cc030c00ac009c013c01400330039002f0035000a0100006a0000000e000c0000096c6f63616c686f737400170000ff01000100000a000a0008001d001700180019000b00020100002300000010000e000c02683208687474702f312e31000500050100000000000d0018001604030503060308040805080604010501060102030201`,
- interception: false,
- },
- {
- // this was a Nightly release at the time
- userAgent: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0",
- helloHex: `010001fc030331e380b7d12018e1202ef3327607203df5c5732b4fa5ab5abaf0b60034c2fb662070c836b9b89123e37f4f1074d152df438fa8ee8a0f89b036fd952f4fcc0b994f001c130113031302c02bc02fcca9cca8c02cc030c013c014002f0035000a0100019700000014001200000f63616464797365727665722e636f6d00170000ff01000100000a000e000c001d00170018001901000101000b0002010000230078c97e7716a041e2ea824571bef26a3dff2bf50a883cd15d904ab2d17deb514f6e0a079ee7c212c000178387ffafc2e530b6df6662f570aae134330f13c458a0eaad5a96a9696f572110918740b15db1143d19aaaa706942030b433a7e6150f62b443c0564e5b8f7ee9577bf3bf7faec8c67425b648ab54d880010000e000c02683208687474702f312e310005000501000000000028006b0069001d0020aee6e596155ee6f79f943e81ceabe0979d27fbbb8b9189ccb2ebc75226351f32001700410421875a44e510decac11ef1d7cfddd4dfe105d5cd3a2d42fba03ebde23e51e8ce65bda1b48be82d4848d1db2bfce68e94092e925a9ce0dbf5df35479558108489002b0009087f12030303020301000d0018001604030503060308040805080604010501060102030201002d000201010015002500000000000000000000000000000000000000000000000000000000000000000000000000`,
- interception: false,
- },
- {
- // Firefox on Fedora (RedHat) doesn't include ECC ciphers because of patent liabilities
- userAgent: "Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:53.0) Gecko/20100101 Firefox/53.0",
- helloHex: `010000b70303f5280b74d617d42e39fd77b78a2b537b1d7787ce4fcbcf3604c9fbcd677c6c5500001ec02bc02fcca9cca8c02cc030c00ac009c013c01400330039002f0035000a0100007000000014001200000f66696e6572706978656c732e636f6d00170000ff01000100000a000a0008001d001700180019000b00020100002300000010000e000c02683208687474702f312e31000500050100000000000d0018001604030503060308040805080604010501060102030201`,
- interception: false,
- },
- },
- "Edge": {
- {
- userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393",
- helloHex: `010000bd030358a3c9bf05f734842e189fb6ce653b67b846e990bc1fc5fb8c397874d06020f1000038c02cc02bc030c02f009f009ec024c023c028c027c00ac009c014c01300390033009d009c003d003c0035002f000a006a00400038003200130100005c000500050100000000000a00080006001d00170018000b00020100000d00140012040105010201040305030203020206010603002300000010000e000c02683208687474702f312e310017000055000006000100020002ff01000100`,
- interception: false,
- },
- },
- "Safari": {
- {
- userAgent: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/602.4.8 (KHTML, like Gecko) Version/10.0.3 Safari/602.4.8",
- helloHex: `010000d2030358a295b513c8140c6ff880f4a8a73cc830ed2dab2c4f2068eb365228d828732e00002600ffc02cc02bc024c023c00ac009c030c02fc028c027c014c013009d009c003d003c0035002f010000830000000e000c0000096c6f63616c686f7374000a00080006001700180019000b00020100000d00120010040102010501060104030203050306033374000000100030002e0268320568322d31360568322d31350568322d313408737064792f332e3106737064792f3308687474702f312e310005000501000000000012000000170000`,
- interception: false,
- },
- {
- userAgent: "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.28 (KHTML, like Gecko) Version/11.0 Mobile/15A5318g Safari/604.1",
- helloHex: `010000e10303be294e11847ba01301e0bb6129f4a0d66344602141a8f0a1ab0750a1db145755000028c02cc02bc024c023cca9c00ac009c030c02fc028c027cca8c014c013009d009c003d003c0035002f01000090ff0100010000000014001200000f66696e6572706978656c732e636f6d00170000000d00140012040308040401050308050501080606010201000500050100000000337400000012000000100030002e0268320568322d31360568322d31350568322d313408737064792f332e3106737064792f3308687474702f312e31000b00020100000a00080006001d00170018`,
- interception: false,
- },
- },
- "Tor": {
- {
- userAgent: "Mozilla/5.0 (Windows NT 6.1; rv:45.0) Gecko/20100101 Firefox/45.0",
- helloHex: `010000a40303137f05d4151f2d9095aee4254416d9dce73d6a1d857e8097ea20d021c04a7a81000016c02bc02fc00ac009c013c01400330039002f0035000a0100006500000014001200000f66696e6572706978656c732e636f6dff01000100000a00080006001700180019000b00020100337400000010000b000908687474702f312e31000500050100000000000d001600140401050106010201040305030603020304020202`,
- interception: false,
- },
- {
- userAgent: "Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0",
- helloHex: `010000b4030322e1f3aff4c37caba303c2ce53ba1689b3e70117a46f413d44f70a74cb6a496100001ec02bc02fcca9cca8c02cc030c00ac009c013c01400330039002f0035000a0100006d00000014001200000f66696e6572706978656c732e636f6d00170000ff01000100000a000a0008001d001700180019000b000201000010000b000908687474702f312e31000500050100000000ff030000000d0018001604030503060308040805080604010501060102030201`,
- interception: false,
- },
- },
- "Other": { // these are either non-browser clients or intercepted client hellos
- {
- // openssl s_client (OpenSSL 0.9.8zh 14 Jan 2016) - NOT an interception, but not a browser either
- helloHex: `0100012b03035d385236b8ca7b7946fa0336f164e76bf821ed90e8de26d97cc677671b6f36380000acc030c02cc028c024c014c00a00a500a300a1009f006b006a0069006800390038003700360088008700860085c032c02ec02ac026c00fc005009d003d00350084c02fc02bc027c023c013c00900a400a200a0009e00670040003f003e0033003200310030009a0099009800970045004400430042c031c02dc029c025c00ec004009c003c002f009600410007c011c007c00cc00200050004c012c008001600130010000dc00dc003000a00ff0201000055000b000403000102000a001c001a00170019001c001b0018001a0016000e000d000b000c0009000a00230000000d0020001e060106020603050105020503040104020403030103020303020102020203000f000101`,
- // NOTE: This test case is not actually an interception, but s_client is not a browser
- // or any client we support MITM checking for, either. Since it advertises heartbeat,
- // our heuristics still flag it as a MITM.
- interception: true,
- },
- {
- // curl 7.51.0 (x86_64-apple-darwin16.0) libcurl/7.51.0 SecureTransport zlib/1.2.8
- userAgent: "curl/7.51.0",
- helloHex: `010000a6030358a28c73a71bdfc1f09dee13fecdc58805dcce42ac44254df548f14645f7dc2c00004400ffc02cc02bc024c023c00ac009c008c030c02fc028c027c014c013c012009f009e006b0067003900330016009d009c003d003c0035002f000a00af00ae008d008c008b01000039000a00080006001700180019000b00020100000d00120010040102010501060104030203050306030005000501000000000012000000170000`,
- interception: false,
- },
- {
- // Avast 17.1.2286 (Feb. 2017) on Windows 10 x64 build 14393, intercepting Edge
- userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393",
- helloHex: `010000ce0303b418fdc4b6cf6436a5e2bfb06b96ed5faa7285c20c7b49341a78be962a9dc40000003ac02cc02bc030c02f009f009ec024c023c028c027c00ac009c014c01300390033009d009c003d003c0035002f000a006a004000380032001300ff0100006b00000014001200000f66696e6572706978656c732e636f6d000b000403000102000a00080006001d0017001800230000000d001400120401050102010403050302030202060106030005000501000000000010000e000c02683208687474702f312e310016000000170000`,
- interception: true,
- },
- {
- // Kaspersky Internet Security 17.0.0.611 on Windows 10 x64 build 14393, intercepting Edge
- userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393",
- helloHex: `010000eb030361ce302bf4b0d5adf1ff30b2cf433c4a4b68f33e07b2651695e7ae6ec3cf126400003ac02cc02bc030c02f009f009ec024c023c028c027c00ac009c014c01300390033009d009c003d003c0035002f000a006a004000380032001300ff0100008800000014001200000f66696e6572706978656c732e636f6d000b000403000102000a001c001a00170019001c001b0018001a0016000e000d000b000c0009000a00230000000d0020001e060106020603050105020503040104020403030103020303020102020203000500050100000000000f0001010010000e000c02683208687474702f312e31`,
- interception: true,
- },
- {
- // Kaspersky Internet Security 17.0.0.611 on Windows 10 x64 build 14393, intercepting Firefox 51
- userAgent: "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0",
- helloHex: `010001fc0303768e3f9ea75194c7cb03d23e8e6371b95fb696d339b797be57a634309ec98a42200f2a7554098364b7f05d21a8c7f43f31a893a4fc5670051020408c8e4dc234dd001cc02bc02fc02cc030c00ac009c013c01400330039002f0035000a00ff0100019700000014001200000f66696e6572706978656c732e636f6d000b000403000102000a001c001a00170019001c001b0018001a0016000e000d000b000c0009000a00230078bf4e244d4de3d53c6331edda9672dfc4a17aae92b671e86da1368b1b5ae5324372817d8f3b7ffe1a7a1537a5049b86cd7c44863978c1e615b005942755da20fc3a4e34a16f78034aa3b1cffcef95f81a0995c522a53b0e95a4f98db84c43359d93d8647b2de2a69f3ebdcfc6bca452730cbd00179226dedf000d0020001e060106020603050105020503040104020403030103020303020102020203000500050100000000000f0001010010000e000c02683208687474702f312e3100150093000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000`,
- interception: true,
- },
- {
- // Kaspersky Internet Security 17.0.0.611 on Windows 10 x64 build 14393, intercepting Chrome 56
- userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
- helloHex: `010000c903033481e7af24e647ba5a79ec97e9264c1a1f990cf842f50effe22be52130d5af82000018c02bc02fc02cc030c013c014009c009d002f0035000a00ff0100008800000014001200000f66696e6572706978656c732e636f6d000b000403000102000a001c001a00170019001c001b0018001a0016000e000d000b000c0009000a00230000000d0020001e060106020603050105020503040104020403030103020303020102020203000500050100000000000f0001010010000e000c02683208687474702f312e31`,
- interception: true,
- },
- {
- // AVG 17.1.3006 (build 17.1.3354.20) on Windows 10 x64 build 14393, intercepting Edge
- userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393",
- helloHex: `010000ca0303fd83091207161eca6b4887db50587109c50e463beb190362736b1fcf9e05f807000036c02cc02bc030c02f009f009ec024c023c028c027c00ac009c014c01300390033009d009c003d003c0035002f006a00400038003200ff0100006b00000014001200000f66696e6572706978656c732e636f6d000b000403000102000a00080006001d0017001800230000000d001400120401050102010403050302030202060106030005000501000000000010000e000c02683208687474702f312e310016000000170000`,
- interception: true,
- },
- {
- // IE 11 on Windows 7, this connection was intercepted by Blue Coat
- // no sensible User-Agent value, since Blue Coat changes it to something super generic
- // By the way, here's another reason we hate Blue Coat: they break TLS 1.3:
- // https://twitter.com/FiloSottile/status/835269932929667072
- helloHex: `010000b1030358a3f3bae627f464da8cb35976b88e9119640032d41e62a107d608ed8d3e62b9000034c028c027c014c013009f009e009d009cc02cc02bc024c023c00ac009003d003c0035002f006a004000380032000a0013000500040100005400000014001200000f66696e6572706978656c732e636f6d000500050100000000000a00080006001700180019000b00020100000d0014001206010603040105010201040305030203020200170000ff01000100`,
- interception: true,
- reqHeaders: http.Header{"X-Bluecoat-Via": {"66808702E9A2CF4"}}, // actual field name would be "X-BlueCoat-Via" but Go canonicalizes field names
- },
- {
- // Firefox 51.0.1 being intercepted by burp 1.7.17
- userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:51.0) Gecko/20100101 Firefox/51.0",
- helloHex: `010000d8030358a92f4daca95acc2f6a10a9c50d736135eae39406d3090238464540d482677600003ac023c027003cc025c02900670040c009c013002fc004c00e00330032c02bc02f009cc02dc031009e00a2c008c012000ac003c00d0016001300ff01000075000a0034003200170001000300130015000600070009000a0018000b000c0019000d000e000f001000110002001200040005001400080016000b00020100000d00180016060306010503050104030401040202030201020201010000001700150000126a61677561722e6b796877616e612e6f7267`,
- interception: true,
- },
- {
- // Chrome 56 on Windows 10 being intercepted by Fortigate (on some public school network); note: I had to enable TLS 1.0 for this test (proxy was issuing a SHA-1 cert to client)
- userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
- helloHex: `010000e5030158ac612125c83bae95282113b2a4c572cf613c160d234350fb6d0ddce879ffec000064003300320039003800160013c013c009c014c00ac012c008002f0035000a00150012003d003c00670040006b006ac011c0070096009a009900410084004500440088008700ba00be00bd00c000c400c3c03cc044c042c03dc045c04300090005000400ff01000058000a003600340000000100020003000400050006000700080009000a000b000c000d000e000f0010001100120013001400150016001700180019000b0002010000000014001200000f66696e6572706978656c732e636f6d`,
- interception: true,
- },
- {
- // IE 11 on Windows 10, intercepted by Fortigate (same firewall as above)
- userAgent: "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
- helloHex: `010000e5030158ac634c5278d7b17421f23a64cc91d68c470c6b247322fe867ba035b373d05c000064003300320039003800160013c013c009c014c00ac012c008002f0035000a00150012003d003c00670040006b006ac011c0070096009a009900410084004500440088008700ba00be00bd00c000c400c3c03cc044c042c03dc045c04300090005000400ff01000058000a003600340000000100020003000400050006000700080009000a000b000c000d000e000f0010001100120013001400150016001700180019000b0002010000000014001200000f66696e6572706978656c732e636f6d`,
- interception: true,
- },
- {
- // Edge 38.14393.0.0 on Windows 10, intercepted by Fortigate (same as above)
- userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393",
- helloHex: `010000e5030158ac6421a45794b8ade6a0ac6c910cde0f99c49bb1ba737b88638ec8dcf0d077000064003300320039003800160013c013c009c014c00ac012c008002f0035000a00150012003d003c00670040006b006ac011c0070096009a009900410084004500440088008700ba00be00bd00c000c400c3c03cc044c042c03dc045c04300090005000400ff01000058000a003600340000000100020003000400050006000700080009000a000b000c000d000e000f0010001100120013001400150016001700180019000b0002010000000014001200000f66696e6572706978656c732e636f6d`,
- interception: true,
- },
- {
- // Firefox 50.0.1 on Windows 10, intercepted by Fortigate (same as above)
- userAgent: "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
- helloHex: `010000e5030158ac64e40495e77b7baf2031281451620bfe354b0c37521ebc0a40f5dc0c0cb6000064003300320039003800160013c013c009c014c00ac012c008002f0035000a00150012003d003c00670040006b006ac011c0070096009a009900410084004500440088008700ba00be00bd00c000c400c3c03cc044c042c03dc045c04300090005000400ff01000058000a003600340000000100020003000400050006000700080009000a000b000c000d000e000f0010001100120013001400150016001700180019000b0002010000000014001200000f66696e6572706978656c732e636f6d`,
- interception: true,
- },
- },
- }
-
- for client, chs := range clientHellos {
- for i, ch := range chs {
- hello, err := hex.DecodeString(ch.helloHex)
- if err != nil {
- t.Errorf("[%s] Test %d: Error decoding ClientHello: %v", client, i, err)
- continue
- }
- parsed := parseRawClientHello(hello)
-
- isChrome := parsed.looksLikeChrome()
- isFirefox := parsed.looksLikeFirefox()
- isSafari := parsed.looksLikeSafari()
- isEdge := parsed.looksLikeEdge()
- isTor := parsed.looksLikeTor()
-
- // we want each of the heuristic functions to be as
- // exclusive but as low-maintenance as possible;
- // in other words, if one returns true, the others
- // should return false, with as little logic as possible,
- // but with enough logic to force TLS proxies to do a
- // good job preserving characterstics of the handshake.
- if (isChrome && (isFirefox || isSafari || isEdge || isTor)) ||
- (isFirefox && (isChrome || isSafari || isEdge || isTor)) ||
- (isSafari && (isChrome || isFirefox || isEdge || isTor)) ||
- (isEdge && (isChrome || isFirefox || isSafari || isTor)) ||
- (isTor && (isChrome || isFirefox || isSafari || isEdge)) {
- t.Errorf("[%s] Test %d: Multiple fingerprinting functions matched: "+
- "Chrome=%v Firefox=%v Safari=%v Edge=%v Tor=%v\n\tparsed hello dec: %+v\n\tparsed hello hex: %#x\n",
- client, i, isChrome, isFirefox, isSafari, isEdge, isTor, parsed, parsed)
- }
-
- // test the handler and detection results
- var got, checked bool
- want := ch.interception
- handler := &tlsHandler{
- next: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- got, checked = r.Context().Value(MitmCtxKey).(bool)
- }),
- listener: newTLSListener(nil, nil),
- }
- handler.listener.helloInfos[""] = parsed
- w := httptest.NewRecorder()
- r, err := http.NewRequest("GET", "/", nil)
- if err != nil {
- t.Fatal(err)
- }
- r.Header.Set("User-Agent", ch.userAgent)
- if ch.reqHeaders != nil {
- for field, values := range ch.reqHeaders {
- r.Header[field] = values // NOTE: field names not standardized when setting directly like this!
- }
- }
- handler.ServeHTTP(w, r)
- if got != want {
- t.Errorf("[%s] Test %d: Expected MITM=%v but got %v (type assertion OK (checked)=%v)",
- client, i, want, got, checked)
- t.Errorf("[%s] Test %d: Looks like Chrome=%v Firefox=%v Safari=%v Edge=%v Tor=%v\n\tparsed hello dec: %+v\n\tparsed hello hex: %#x\n",
- client, i, isChrome, isFirefox, isSafari, isEdge, isTor, parsed, parsed)
- }
- }
- }
-}
-
-func TestGetVersion(t *testing.T) {
- for i, test := range []struct {
- UserAgent string
- SoftwareName string
- Version float64
- }{
- {
- UserAgent: "Mozilla/5.0 (Windows NT 6.1; rv:45.0) Gecko/20100101 Firefox/45.0",
- SoftwareName: "Firefox",
- Version: 45.0,
- },
- {
- UserAgent: "Mozilla/5.0 (Windows NT 6.1; rv:45.0) Gecko/20100101 Firefox/45.0 more_stuff_here",
- SoftwareName: "Firefox",
- Version: 45.0,
- },
- {
- UserAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393",
- SoftwareName: "Safari",
- Version: 537.36,
- },
- {
- UserAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393",
- SoftwareName: "Chrome",
- Version: 51.0270479,
- },
- {
- UserAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393",
- SoftwareName: "Mozilla",
- Version: 5.0,
- },
- {
- UserAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393",
- SoftwareName: "curl",
- Version: -1,
- },
- } {
- actual := getVersion(test.UserAgent, test.SoftwareName)
- if actual != test.Version {
- t.Errorf("Test [%d]: Expected version=%f, got version=%f for %s in '%s'",
- i, test.Version, actual, test.SoftwareName, test.UserAgent)
- }
- }
-}
diff --git a/caddyhttp/httpserver/path.go b/caddyhttp/httpserver/path.go
deleted file mode 100644
index 92136395d97..00000000000
--- a/caddyhttp/httpserver/path.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package httpserver
-
-import (
- "net/http"
- "strings"
-)
-
-// Path represents a URI path. It should usually be
-// set to the value of a request path.
-type Path string
-
-// Matches checks to see if base matches p. The correct
-// usage of this method sets p as the request path, and
-// base as a Caddyfile (user-defined) rule path.
-//
-// Path matching will probably not always be a direct
-// comparison; this method assures that paths can be
-// easily and consistently matched.
-func (p Path) Matches(base string) bool {
- if base == "/" {
- return true
- }
- if CaseSensitivePath {
- return strings.HasPrefix(string(p), base)
- }
- return strings.HasPrefix(strings.ToLower(string(p)), strings.ToLower(base))
-}
-
-// PathMatcher is a Path RequestMatcher.
-type PathMatcher string
-
-// Match satisfies RequestMatcher.
-func (p PathMatcher) Match(r *http.Request) bool {
- return Path(r.URL.Path).Matches(string(p))
-}
diff --git a/caddyhttp/httpserver/path_test.go b/caddyhttp/httpserver/path_test.go
deleted file mode 100644
index 6ae92e8f1d1..00000000000
--- a/caddyhttp/httpserver/path_test.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package httpserver
-
-import "testing"
-
-func TestPathMatches(t *testing.T) {
- for i, testcase := range []struct {
- reqPath Path
- rulePath string // or "base path" as in Caddyfile docs
- shouldMatch bool
- caseInsensitive bool
- }{
- {
- reqPath: "/",
- rulePath: "/",
- shouldMatch: true,
- },
- {
- reqPath: "/foo/bar",
- rulePath: "/foo",
- shouldMatch: true,
- },
- {
- reqPath: "/foobar",
- rulePath: "/foo/",
- shouldMatch: false,
- },
- {
- reqPath: "/foobar",
- rulePath: "/foo/bar",
- shouldMatch: false,
- },
- {
- reqPath: "/Foobar",
- rulePath: "/Foo",
- shouldMatch: true,
- },
- {
-
- reqPath: "/FooBar",
- rulePath: "/Foo",
- shouldMatch: true,
- },
- {
- reqPath: "/foobar",
- rulePath: "/FooBar",
- shouldMatch: true,
- caseInsensitive: true,
- },
- {
- reqPath: "",
- rulePath: "/", // a lone forward slash means to match all requests (see issue #1645) - many future test cases related to this issue
- shouldMatch: true,
- },
- {
- reqPath: "foobar.php",
- rulePath: "/",
- shouldMatch: true,
- },
- {
- reqPath: "",
- rulePath: "",
- shouldMatch: true,
- },
- {
- reqPath: "/foo/bar",
- rulePath: "",
- shouldMatch: true,
- },
- {
- reqPath: "/foo/bar",
- rulePath: "",
- shouldMatch: true,
- },
- {
- reqPath: "no/leading/slash",
- rulePath: "/",
- shouldMatch: true,
- },
- {
- reqPath: "no/leading/slash",
- rulePath: "/no/leading/slash",
- shouldMatch: false,
- },
- {
- reqPath: "no/leading/slash",
- rulePath: "",
- shouldMatch: true,
- },
- } {
- CaseSensitivePath = !testcase.caseInsensitive
- if got, want := testcase.reqPath.Matches(testcase.rulePath), testcase.shouldMatch; got != want {
- t.Errorf("Test %d: For request path '%s' and other path '%s': expected %v, got %v",
- i, testcase.reqPath, testcase.rulePath, want, got)
- }
- }
-}
diff --git a/caddyhttp/httpserver/plugin.go b/caddyhttp/httpserver/plugin.go
deleted file mode 100644
index 65ccab737ef..00000000000
--- a/caddyhttp/httpserver/plugin.go
+++ /dev/null
@@ -1,545 +0,0 @@
-package httpserver
-
-import (
- "flag"
- "fmt"
- "log"
- "net"
- "net/url"
- "os"
- "path/filepath"
- "strings"
- "time"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyfile"
- "github.com/mholt/caddy/caddytls"
-)
-
-const serverType = "http"
-
-func init() {
- flag.StringVar(&HTTPPort, "http-port", HTTPPort, "Default port to use for HTTP")
- flag.StringVar(&HTTPSPort, "https-port", HTTPSPort, "Default port to use for HTTPS")
- flag.StringVar(&Host, "host", DefaultHost, "Default host")
- flag.StringVar(&Port, "port", DefaultPort, "Default port")
- flag.StringVar(&Root, "root", DefaultRoot, "Root path of default site")
- flag.DurationVar(&GracefulTimeout, "grace", 5*time.Second, "Maximum duration of graceful shutdown")
- flag.BoolVar(&HTTP2, "http2", true, "Use HTTP/2")
- flag.BoolVar(&QUIC, "quic", false, "Use experimental QUIC")
-
- caddy.RegisterServerType(serverType, caddy.ServerType{
- Directives: func() []string { return directives },
- DefaultInput: func() caddy.Input {
- if Port == DefaultPort && Host != "" {
- // by leaving the port blank in this case we give auto HTTPS
- // a chance to set the port to 443 for us
- return caddy.CaddyfileInput{
- Contents: []byte(fmt.Sprintf("%s\nroot %s", Host, Root)),
- ServerTypeName: serverType,
- }
- }
- return caddy.CaddyfileInput{
- Contents: []byte(fmt.Sprintf("%s:%s\nroot %s", Host, Port, Root)),
- ServerTypeName: serverType,
- }
- },
- NewContext: newContext,
- })
- caddy.RegisterCaddyfileLoader("short", caddy.LoaderFunc(shortCaddyfileLoader))
- caddy.RegisterParsingCallback(serverType, "root", hideCaddyfile)
- caddy.RegisterParsingCallback(serverType, "tls", activateHTTPS)
- caddytls.RegisterConfigGetter(serverType, func(c *caddy.Controller) *caddytls.Config { return GetConfig(c).TLS })
-}
-
-// hideCaddyfile hides the source/origin Caddyfile if it is within the
-// site root. This function should be run after parsing the root directive.
-func hideCaddyfile(cctx caddy.Context) error {
- ctx := cctx.(*httpContext)
- for _, cfg := range ctx.siteConfigs {
- // if no Caddyfile exists exit.
- if cfg.originCaddyfile == "" {
- return nil
- }
- absRoot, err := filepath.Abs(cfg.Root)
- if err != nil {
- return err
- }
- absOriginCaddyfile, err := filepath.Abs(cfg.originCaddyfile)
- if err != nil {
- return err
- }
- if strings.HasPrefix(absOriginCaddyfile, absRoot) {
- cfg.HiddenFiles = append(cfg.HiddenFiles, filepath.ToSlash(strings.TrimPrefix(absOriginCaddyfile, absRoot)))
- }
- }
- return nil
-}
-
-func newContext() caddy.Context {
- return &httpContext{keysToSiteConfigs: make(map[string]*SiteConfig)}
-}
-
-type httpContext struct {
- // keysToSiteConfigs maps an address at the top of a
- // server block (a "key") to its SiteConfig. Not all
- // SiteConfigs will be represented here, only ones
- // that appeared in the Caddyfile.
- keysToSiteConfigs map[string]*SiteConfig
-
- // siteConfigs is the master list of all site configs.
- siteConfigs []*SiteConfig
-}
-
-func (h *httpContext) saveConfig(key string, cfg *SiteConfig) {
- h.siteConfigs = append(h.siteConfigs, cfg)
- h.keysToSiteConfigs[key] = cfg
-}
-
-// InspectServerBlocks make sure that everything checks out before
-// executing directives and otherwise prepares the directives to
-// be parsed and executed.
-func (h *httpContext) InspectServerBlocks(sourceFile string, serverBlocks []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error) {
- // For each address in each server block, make a new config
- for _, sb := range serverBlocks {
- for _, key := range sb.Keys {
- key = strings.ToLower(key)
- if _, dup := h.keysToSiteConfigs[key]; dup {
- return serverBlocks, fmt.Errorf("duplicate site address: %s", key)
- }
- addr, err := standardizeAddress(key)
- if err != nil {
- return serverBlocks, err
- }
-
- // Fill in address components from command line so that middleware
- // have access to the correct information during setup
- if addr.Host == "" && Host != DefaultHost {
- addr.Host = Host
- }
- if addr.Port == "" && Port != DefaultPort {
- addr.Port = Port
- }
-
- // If default HTTP or HTTPS ports have been customized,
- // make sure the ACME challenge ports match
- var altHTTPPort, altTLSSNIPort string
- if HTTPPort != DefaultHTTPPort {
- altHTTPPort = HTTPPort
- }
- if HTTPSPort != DefaultHTTPSPort {
- altTLSSNIPort = HTTPSPort
- }
-
- // Save the config to our master list, and key it for lookups
- cfg := &SiteConfig{
- Addr: addr,
- Root: Root,
- TLS: &caddytls.Config{
- Hostname: addr.Host,
- AltHTTPPort: altHTTPPort,
- AltTLSSNIPort: altTLSSNIPort,
- },
- originCaddyfile: sourceFile,
- }
- h.saveConfig(key, cfg)
- }
- }
-
- // For sites that have gzip (which gets chained in
- // before the error handler) we should ensure that the
- // errors directive also appears so error pages aren't
- // written after the gzip writer is closed. See #616.
- for _, sb := range serverBlocks {
- _, hasGzip := sb.Tokens["gzip"]
- _, hasErrors := sb.Tokens["errors"]
- if hasGzip && !hasErrors {
- sb.Tokens["errors"] = []caddyfile.Token{{Text: "errors"}}
- }
- }
-
- return serverBlocks, nil
-}
-
-// MakeServers uses the newly-created siteConfigs to
-// create and return a list of server instances.
-func (h *httpContext) MakeServers() ([]caddy.Server, error) {
- // make sure TLS is disabled for explicitly-HTTP sites
- // (necessary when HTTP address shares a block containing tls)
- for _, cfg := range h.siteConfigs {
- if !cfg.TLS.Enabled {
- continue
- }
- if cfg.Addr.Port == HTTPPort || cfg.Addr.Scheme == "http" {
- cfg.TLS.Enabled = false
- log.Printf("[WARNING] TLS disabled for %s", cfg.Addr)
- } else if cfg.Addr.Scheme == "" {
- // set scheme to https ourselves, since TLS is enabled
- // and it was not explicitly set to something else. this
- // makes it appear as "https" when we print the list of
- // running sites; otherwise "http" would be assumed which
- // is incorrect for this site.
- cfg.Addr.Scheme = "https"
- }
- if cfg.Addr.Port == "" && ((!cfg.TLS.Manual && !cfg.TLS.SelfSigned) || cfg.TLS.OnDemand) {
- // this is vital, otherwise the function call below that
- // sets the listener address will use the default port
- // instead of 443 because it doesn't know about TLS.
- cfg.Addr.Port = HTTPSPort
- }
- }
-
- // we must map (group) each config to a bind address
- groups, err := groupSiteConfigsByListenAddr(h.siteConfigs)
- if err != nil {
- return nil, err
- }
-
- // then we create a server for each group
- var servers []caddy.Server
- for addr, group := range groups {
- s, err := NewServer(addr, group)
- if err != nil {
- return nil, err
- }
- servers = append(servers, s)
- }
-
- return servers, nil
-}
-
-// GetConfig gets the SiteConfig that corresponds to c.
-// If none exist (should only happen in tests), then a
-// new, empty one will be created.
-func GetConfig(c *caddy.Controller) *SiteConfig {
- ctx := c.Context().(*httpContext)
- key := strings.ToLower(c.Key)
- if cfg, ok := ctx.keysToSiteConfigs[key]; ok {
- return cfg
- }
- // we should only get here during tests because directive
- // actions typically skip the server blocks where we make
- // the configs
- cfg := &SiteConfig{Root: Root, TLS: new(caddytls.Config)}
- ctx.saveConfig(key, cfg)
- return cfg
-}
-
-// shortCaddyfileLoader loads a Caddyfile if positional arguments are
-// detected, or, in other words, if un-named arguments are provided to
-// the program. A "short Caddyfile" is one in which each argument
-// is a line of the Caddyfile. The default host and port are prepended
-// according to the Host and Port values.
-func shortCaddyfileLoader(serverType string) (caddy.Input, error) {
- if flag.NArg() > 0 && serverType == "http" {
- confBody := fmt.Sprintf("%s:%s\n%s", Host, Port, strings.Join(flag.Args(), "\n"))
- return caddy.CaddyfileInput{
- Contents: []byte(confBody),
- Filepath: "args",
- ServerTypeName: serverType,
- }, nil
- }
- return nil, nil
-}
-
-// groupSiteConfigsByListenAddr groups site configs by their listen
-// (bind) address, so sites that use the same listener can be served
-// on the same server instance. The return value maps the listen
-// address (what you pass into net.Listen) to the list of site configs.
-// This function does NOT vet the configs to ensure they are compatible.
-func groupSiteConfigsByListenAddr(configs []*SiteConfig) (map[string][]*SiteConfig, error) {
- groups := make(map[string][]*SiteConfig)
-
- for _, conf := range configs {
- // We would add a special case here so that localhost addresses
- // bind to 127.0.0.1 if conf.ListenHost is not already set, which
- // would prevent outsiders from even connecting; but that was problematic:
- // https://caddy.community/t/wildcard-virtual-domains-with-wildcard-roots/221/5?u=matt
-
- if conf.Addr.Port == "" {
- conf.Addr.Port = Port
- }
- addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(conf.ListenHost, conf.Addr.Port))
- if err != nil {
- return nil, err
- }
- addrstr := addr.String()
- groups[addrstr] = append(groups[addrstr], conf)
- }
-
- return groups, nil
-}
-
-// Address represents a site address. It contains
-// the original input value, and the component
-// parts of an address. The component parts may be
-// updated to the correct values as setup proceeds,
-// but the original value should never be changed.
-type Address struct {
- Original, Scheme, Host, Port, Path string
-}
-
-// String returns a human-friendly print of the address.
-func (a Address) String() string {
- if a.Host == "" && a.Port == "" {
- return ""
- }
- scheme := a.Scheme
- if scheme == "" {
- if a.Port == HTTPSPort {
- scheme = "https"
- } else {
- scheme = "http"
- }
- }
- s := scheme
- if s != "" {
- s += "://"
- }
- s += a.Host
- if a.Port != "" &&
- ((scheme == "https" && a.Port != DefaultHTTPSPort) ||
- (scheme == "http" && a.Port != DefaultHTTPPort)) {
- s += ":" + a.Port
- }
- if a.Path != "" {
- s += a.Path
- }
- return s
-}
-
-// VHost returns a sensible concatenation of Host:Port/Path from a.
-// It's basically the a.Original but without the scheme.
-func (a Address) VHost() string {
- if idx := strings.Index(a.Original, "://"); idx > -1 {
- return a.Original[idx+3:]
- }
- return a.Original
-}
-
-// standardizeAddress parses an address string into a structured format with separate
-// scheme, host, port, and path portions, as well as the original input string.
-func standardizeAddress(str string) (Address, error) {
- input := str
-
- // Split input into components (prepend with // to assert host by default)
- if !strings.Contains(str, "//") && !strings.HasPrefix(str, "/") {
- str = "//" + str
- }
- u, err := url.Parse(str)
- if err != nil {
- return Address{}, err
- }
-
- // separate host and port
- host, port, err := net.SplitHostPort(u.Host)
- if err != nil {
- host, port, err = net.SplitHostPort(u.Host + ":")
- if err != nil {
- host = u.Host
- }
- }
-
- // see if we can set port based off scheme
- if port == "" {
- if u.Scheme == "http" {
- port = HTTPPort
- } else if u.Scheme == "https" {
- port = HTTPSPort
- }
- }
-
- // repeated or conflicting scheme is confusing, so error
- if u.Scheme != "" && (port == "http" || port == "https") {
- return Address{}, fmt.Errorf("[%s] scheme specified twice in address", input)
- }
-
- // error if scheme and port combination violate convention
- if (u.Scheme == "http" && port == HTTPSPort) || (u.Scheme == "https" && port == HTTPPort) {
- return Address{}, fmt.Errorf("[%s] scheme and port violate convention", input)
- }
-
- // standardize http and https ports to their respective port numbers
- if port == "http" {
- u.Scheme = "http"
- port = HTTPPort
- } else if port == "https" {
- u.Scheme = "https"
- port = HTTPSPort
- }
-
- return Address{Original: input, Scheme: u.Scheme, Host: host, Port: port, Path: u.Path}, err
-}
-
-// RegisterDevDirective splices name into the list of directives
-// immediately before another directive. This function is ONLY
-// for plugin development purposes! NEVER use it for a plugin
-// that you are not currently building. If before is empty,
-// the directive will be appended to the end of the list.
-//
-// It is imperative that directives execute in the proper
-// order, and hard-coding the list of directives guarantees
-// a correct, absolute order every time. This function is
-// convenient when developing a plugin, but it does not
-// guarantee absolute ordering. Multiple plugins registering
-// directives with this function will lead to non-
-// deterministic builds and buggy software.
-//
-// Directive names must be lower-cased and unique. Any errors
-// here are fatal, and even successful calls print a message
-// to stdout as a reminder to use it only in development.
-func RegisterDevDirective(name, before string) {
- if name == "" {
- fmt.Println("[FATAL] Cannot register empty directive name")
- os.Exit(1)
- }
- if strings.ToLower(name) != name {
- fmt.Printf("[FATAL] %s: directive name must be lowercase\n", name)
- os.Exit(1)
- }
- for _, dir := range directives {
- if dir == name {
- fmt.Printf("[FATAL] %s: directive name already exists\n", name)
- os.Exit(1)
- }
- }
- if before == "" {
- directives = append(directives, name)
- } else {
- var found bool
- for i, dir := range directives {
- if dir == before {
- directives = append(directives[:i], append([]string{name}, directives[i:]...)...)
- found = true
- break
- }
- }
- if !found {
- fmt.Printf("[FATAL] %s: directive not found\n", before)
- os.Exit(1)
- }
- }
- msg := fmt.Sprintf("Registered directive '%s' ", name)
- if before == "" {
- msg += "at end of list"
- } else {
- msg += fmt.Sprintf("before '%s'", before)
- }
- fmt.Printf("[DEV NOTICE] %s\n", msg)
-}
-
-// directives is the list of all directives known to exist for the
-// http server type, including non-standard (3rd-party) directives.
-// The ordering of this list is important.
-var directives = []string{
- // primitive actions that set up the fundamental vitals of each config
- "root",
- "index",
- "bind",
- "limits",
- "timeouts",
- "tls",
-
- // services/utilities, or other directives that don't necessarily inject handlers
- "startup",
- "shutdown",
- "request_id",
- "realip", // github.com/captncraig/caddy-realip
- "git", // github.com/abiosoft/caddy-git
-
- // directives that add listener middleware to the stack
- "proxyprotocol", // github.com/mastercactapus/caddy-proxyprotocol
-
- // directives that add middleware to the stack
- "locale", // github.com/simia-tech/caddy-locale
- "log",
- "cache", // github.com/nicolasazrak/caddy-cache
- "rewrite",
- "ext",
- "gzip",
- "header",
- "errors",
- "authz", // github.com/casbin/caddy-authz
- "filter", // github.com/echocat/caddy-filter
- "minify", // github.com/hacdias/caddy-minify
- "ipfilter", // github.com/pyed/ipfilter
- "ratelimit", // github.com/xuqingfeng/caddy-rate-limit
- "search", // github.com/pedronasser/caddy-search
- "expires", // github.com/epicagency/caddy-expires
- "basicauth",
- "redir",
- "status",
- "cors", // github.com/captncraig/cors/caddy
- "nobots", // github.com/Xumeiquer/nobots
- "mime",
- "login", // github.com/tarent/loginsrv/caddy
- "reauth", // github.com/freman/caddy-reauth
- "jwt", // github.com/BTBurke/caddy-jwt
- "jsonp", // github.com/pschlump/caddy-jsonp
- "upload", // blitznote.com/src/caddy.upload
- "multipass", // github.com/namsral/multipass/caddy
- "internal",
- "pprof",
- "expvar",
- "push",
- "datadog", // github.com/payintech/caddy-datadog
- "prometheus", // github.com/miekg/caddy-prometheus
- "proxy",
- "fastcgi",
- "cgi", // github.com/jung-kurt/caddy-cgi
- "websocket",
- "filemanager", // github.com/hacdias/filemanager/caddy/filemanager
- "webdav", // github.com/hacdias/caddy-webdav
- "markdown",
- "templates",
- "browse",
- "jekyll", // github.com/hacdias/filemanager/caddy/jekyll
- "hugo", // github.com/hacdias/filemanager/caddy/hugo
- "mailout", // github.com/SchumacherFM/mailout
- "awses", // github.com/miquella/caddy-awses
- "awslambda", // github.com/coopernurse/caddy-awslambda
- "grpc", // github.com/pieterlouw/caddy-grpc
- "gopkg", // github.com/zikes/gopkg
- "restic", // github.com/restic/caddy
-}
-
-const (
- // DefaultHost is the default host.
- DefaultHost = ""
- // DefaultPort is the default port.
- DefaultPort = "2015"
- // DefaultRoot is the default root folder.
- DefaultRoot = "."
- // DefaultHTTPPort is the default port for HTTP.
- DefaultHTTPPort = "80"
- // DefaultHTTPSPort is the default port for HTTPS.
- DefaultHTTPSPort = "443"
-)
-
-// These "soft defaults" are configurable by
-// command line flags, etc.
-var (
- // Root is the site root
- Root = DefaultRoot
-
- // Host is the site host
- Host = DefaultHost
-
- // Port is the site port
- Port = DefaultPort
-
- // GracefulTimeout is the maximum duration of a graceful shutdown.
- GracefulTimeout time.Duration
-
- // HTTP2 indicates whether HTTP2 is enabled or not.
- HTTP2 bool
-
- // QUIC indicates whether QUIC is enabled or not.
- QUIC bool
-
- // HTTPPort is the port to use for HTTP.
- HTTPPort = DefaultHTTPPort
-
- // HTTPSPort is the port to use for HTTPS.
- HTTPSPort = DefaultHTTPSPort
-)
diff --git a/caddyhttp/httpserver/plugin_test.go b/caddyhttp/httpserver/plugin_test.go
deleted file mode 100644
index 34781388414..00000000000
--- a/caddyhttp/httpserver/plugin_test.go
+++ /dev/null
@@ -1,235 +0,0 @@
-package httpserver
-
-import (
- "strings"
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyfile"
-)
-
-func TestStandardizeAddress(t *testing.T) {
- for i, test := range []struct {
- input string
- scheme, host, port, path string
- shouldErr bool
- }{
- {`localhost`, "", "localhost", "", "", false},
- {`localhost:1234`, "", "localhost", "1234", "", false},
- {`localhost:`, "", "localhost", "", "", false},
- {`0.0.0.0`, "", "0.0.0.0", "", "", false},
- {`127.0.0.1:1234`, "", "127.0.0.1", "1234", "", false},
- {`:1234`, "", "", "1234", "", false},
- {`[::1]`, "", "::1", "", "", false},
- {`[::1]:1234`, "", "::1", "1234", "", false},
- {`:`, "", "", "", "", false},
- {`localhost:http`, "http", "localhost", "80", "", false},
- {`localhost:https`, "https", "localhost", "443", "", false},
- {`:http`, "http", "", "80", "", false},
- {`:https`, "https", "", "443", "", false},
- {`http://localhost:https`, "", "", "", "", true}, // conflict
- {`http://localhost:http`, "", "", "", "", true}, // repeated scheme
- {`http://localhost:443`, "", "", "", "", true}, // not conventional
- {`https://localhost:80`, "", "", "", "", true}, // not conventional
- {`http://localhost`, "http", "localhost", "80", "", false},
- {`https://localhost`, "https", "localhost", "443", "", false},
- {`http://127.0.0.1`, "http", "127.0.0.1", "80", "", false},
- {`https://127.0.0.1`, "https", "127.0.0.1", "443", "", false},
- {`http://[::1]`, "http", "::1", "80", "", false},
- {`http://localhost:1234`, "http", "localhost", "1234", "", false},
- {`https://127.0.0.1:1234`, "https", "127.0.0.1", "1234", "", false},
- {`http://[::1]:1234`, "http", "::1", "1234", "", false},
- {``, "", "", "", "", false},
- {`::1`, "", "::1", "", "", true},
- {`localhost::`, "", "localhost::", "", "", true},
- {`#$%@`, "", "", "", "", true},
- {`host/path`, "", "host", "", "/path", false},
- {`http://host/`, "http", "host", "80", "/", false},
- {`//asdf`, "", "asdf", "", "", false},
- {`:1234/asdf`, "", "", "1234", "/asdf", false},
- {`http://host/path`, "http", "host", "80", "/path", false},
- {`https://host:443/path/foo`, "https", "host", "443", "/path/foo", false},
- {`host:80/path`, "", "host", "80", "/path", false},
- {`host:https/path`, "https", "host", "443", "/path", false},
- {`/path`, "", "", "", "/path", false},
- } {
- actual, err := standardizeAddress(test.input)
-
- if err != nil && !test.shouldErr {
- t.Errorf("Test %d (%s): Expected no error, but had error: %v", i, test.input, err)
- }
- if err == nil && test.shouldErr {
- t.Errorf("Test %d (%s): Expected error, but had none", i, test.input)
- }
-
- if !test.shouldErr && actual.Original != test.input {
- t.Errorf("Test %d (%s): Expected original '%s', got '%s'", i, test.input, test.input, actual.Original)
- }
- if actual.Scheme != test.scheme {
- t.Errorf("Test %d (%s): Expected scheme '%s', got '%s'", i, test.input, test.scheme, actual.Scheme)
- }
- if actual.Host != test.host {
- t.Errorf("Test %d (%s): Expected host '%s', got '%s'", i, test.input, test.host, actual.Host)
- }
- if actual.Port != test.port {
- t.Errorf("Test %d (%s): Expected port '%s', got '%s'", i, test.input, test.port, actual.Port)
- }
- if actual.Path != test.path {
- t.Errorf("Test %d (%s): Expected path '%s', got '%s'", i, test.input, test.path, actual.Path)
- }
- }
-}
-
-func TestAddressVHost(t *testing.T) {
- for i, test := range []struct {
- addr Address
- expected string
- }{
- {Address{Original: "host:1234"}, "host:1234"},
- {Address{Original: "host:1234/foo"}, "host:1234/foo"},
- {Address{Original: "host/foo"}, "host/foo"},
- {Address{Original: "http://host/foo"}, "host/foo"},
- {Address{Original: "https://host/foo"}, "host/foo"},
- } {
- actual := test.addr.VHost()
- if actual != test.expected {
- t.Errorf("Test %d: expected '%s' but got '%s'", i, test.expected, actual)
- }
- }
-}
-
-func TestAddressString(t *testing.T) {
- for i, test := range []struct {
- addr Address
- expected string
- }{
- {Address{Scheme: "http", Host: "host", Port: "1234", Path: "/path"}, "http://host:1234/path"},
- {Address{Scheme: "", Host: "host", Port: "", Path: ""}, "http://host"},
- {Address{Scheme: "", Host: "host", Port: "80", Path: ""}, "http://host"},
- {Address{Scheme: "", Host: "host", Port: "443", Path: ""}, "https://host"},
- {Address{Scheme: "https", Host: "host", Port: "443", Path: ""}, "https://host"},
- {Address{Scheme: "https", Host: "host", Port: "", Path: ""}, "https://host"},
- {Address{Scheme: "", Host: "host", Port: "80", Path: "/path"}, "http://host/path"},
- {Address{Scheme: "http", Host: "", Port: "1234", Path: ""}, "http://:1234"},
- {Address{Scheme: "", Host: "", Port: "", Path: ""}, ""},
- } {
- actual := test.addr.String()
- if actual != test.expected {
- t.Errorf("Test %d: expected '%s' but got '%s'", i, test.expected, actual)
- }
- }
-}
-
-func TestInspectServerBlocksWithCustomDefaultPort(t *testing.T) {
- Port = "9999"
- filename := "Testfile"
- ctx := newContext().(*httpContext)
- input := strings.NewReader(`localhost`)
- sblocks, err := caddyfile.Parse(filename, input, nil)
- if err != nil {
- t.Fatalf("Expected no error setting up test, got: %v", err)
- }
- _, err = ctx.InspectServerBlocks(filename, sblocks)
- if err != nil {
- t.Fatalf("Didn't expect an error, but got: %v", err)
- }
- addr := ctx.keysToSiteConfigs["localhost"].Addr
- if addr.Port != Port {
- t.Errorf("Expected the port on the address to be set, but got: %#v", addr)
- }
-}
-
-func TestInspectServerBlocksCaseInsensitiveKey(t *testing.T) {
- filename := "Testfile"
- ctx := newContext().(*httpContext)
- input := strings.NewReader("localhost {\n}\nLOCALHOST {\n}")
- sblocks, err := caddyfile.Parse(filename, input, nil)
- if err != nil {
- t.Fatalf("Expected no error setting up test, got: %v", err)
- }
- _, err = ctx.InspectServerBlocks(filename, sblocks)
- if err == nil {
- t.Error("Expected an error because keys on this server type are case-insensitive (so these are duplicated), but didn't get an error")
- }
-}
-
-func TestGetConfig(t *testing.T) {
- // case insensitivity for key
- con := caddy.NewTestController("http", "")
- con.Key = "foo"
- cfg := GetConfig(con)
- con.Key = "FOO"
- cfg2 := GetConfig(con)
- if cfg != cfg2 {
- t.Errorf("Expected same config using same key with different case; got %p and %p", cfg, cfg2)
- }
-
- // make sure different key returns different config
- con.Key = "foobar"
- cfg3 := GetConfig(con)
- if cfg == cfg3 {
- t.Errorf("Expected different configs using when key is different; got %p and %p", cfg, cfg3)
- }
-}
-
-func TestDirectivesList(t *testing.T) {
- for i, dir1 := range directives {
- if dir1 == "" {
- t.Errorf("directives[%d]: empty directive name", i)
- continue
- }
- if got, want := dir1, strings.ToLower(dir1); got != want {
- t.Errorf("directives[%d]: %s should be lower-cased", i, dir1)
- continue
- }
- for j := i + 1; j < len(directives); j++ {
- dir2 := directives[j]
- if dir1 == dir2 {
- t.Errorf("directives[%d] (%s) is a duplicate of directives[%d] (%s)",
- j, dir2, i, dir1)
- }
- }
- }
-}
-
-func TestContextSaveConfig(t *testing.T) {
- ctx := newContext().(*httpContext)
- ctx.saveConfig("foo", new(SiteConfig))
- if _, ok := ctx.keysToSiteConfigs["foo"]; !ok {
- t.Error("Expected config to be saved, but it wasn't")
- }
- if got, want := len(ctx.siteConfigs), 1; got != want {
- t.Errorf("Expected len(siteConfigs) == %d, but was %d", want, got)
- }
- ctx.saveConfig("Foobar", new(SiteConfig))
- if _, ok := ctx.keysToSiteConfigs["foobar"]; ok {
- t.Error("Did not expect to get config with case-insensitive key, but did")
- }
- if got, want := len(ctx.siteConfigs), 2; got != want {
- t.Errorf("Expected len(siteConfigs) == %d, but was %d", want, got)
- }
-}
-
-// Test to make sure we are correctly hiding the Caddyfile
-func TestHideCaddyfile(t *testing.T) {
- ctx := newContext().(*httpContext)
- ctx.saveConfig("test", &SiteConfig{
- Root: Root,
- originCaddyfile: "Testfile",
- })
- err := hideCaddyfile(ctx)
- if err != nil {
- t.Fatalf("Failed to hide Caddyfile, got: %v", err)
- return
- }
- if len(ctx.siteConfigs[0].HiddenFiles) == 0 {
- t.Fatal("Failed to add Caddyfile to HiddenFiles.")
- return
- }
- for _, file := range ctx.siteConfigs[0].HiddenFiles {
- if file == "/Testfile" {
- return
- }
- }
- t.Fatal("Caddyfile missing from HiddenFiles")
-}
diff --git a/caddyhttp/httpserver/recorder.go b/caddyhttp/httpserver/recorder.go
deleted file mode 100644
index da89056cf4f..00000000000
--- a/caddyhttp/httpserver/recorder.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package httpserver
-
-import (
- "net/http"
- "time"
-)
-
-// ResponseRecorder is a type of http.ResponseWriter that captures
-// the status code written to it and also the size of the body
-// written in the response. A status code does not have
-// to be written, however, in which case 200 must be assumed.
-// It is best to have the constructor initialize this type
-// with that default status code.
-//
-// Setting the Replacer field allows middlewares to type-assert
-// the http.ResponseWriter to ResponseRecorder and set their own
-// placeholder values for logging utilities to use.
-//
-// Beware when accessing the Replacer value; it may be nil!
-type ResponseRecorder struct {
- *ResponseWriterWrapper
- Replacer Replacer
- status int
- size int
- start time.Time
-}
-
-// NewResponseRecorder makes and returns a new responseRecorder,
-// which captures the HTTP Status code from the ResponseWriter
-// and also the length of the response body written through it.
-// Because a status is not set unless WriteHeader is called
-// explicitly, this constructor initializes with a status code
-// of 200 to cover the default case.
-func NewResponseRecorder(w http.ResponseWriter) *ResponseRecorder {
- return &ResponseRecorder{
- ResponseWriterWrapper: &ResponseWriterWrapper{ResponseWriter: w},
- status: http.StatusOK,
- start: time.Now(),
- }
-}
-
-// WriteHeader records the status code and calls the
-// underlying ResponseWriter's WriteHeader method.
-func (r *ResponseRecorder) WriteHeader(status int) {
- r.status = status
- r.ResponseWriterWrapper.WriteHeader(status)
-}
-
-// Write is a wrapper that records the size of the body
-// that gets written.
-func (r *ResponseRecorder) Write(buf []byte) (int, error) {
- n, err := r.ResponseWriterWrapper.Write(buf)
- if err == nil {
- r.size += n
- }
- return n, err
-}
-
-// Size is a Getter to size property
-func (r *ResponseRecorder) Size() int {
- return r.size
-}
-
-// Status is a Getter to status property
-func (r *ResponseRecorder) Status() int {
- return r.status
-}
-
-// Interface guards
-var _ HTTPInterfaces = (*ResponseRecorder)(nil)
diff --git a/caddyhttp/httpserver/recorder_test.go b/caddyhttp/httpserver/recorder_test.go
deleted file mode 100644
index 0772d669f4d..00000000000
--- a/caddyhttp/httpserver/recorder_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package httpserver
-
-import (
- "net/http"
- "net/http/httptest"
- "testing"
-)
-
-func TestNewResponseRecorder(t *testing.T) {
- w := httptest.NewRecorder()
- recordRequest := NewResponseRecorder(w)
- if !(recordRequest.ResponseWriter == w) {
- t.Fatalf("Expected Response writer in the Recording to be same as the one sent\n")
- }
- if recordRequest.status != http.StatusOK {
- t.Fatalf("Expected recorded status to be http.StatusOK (%d) , but found %d\n ", http.StatusOK, recordRequest.status)
- }
-}
-func TestWriteHeader(t *testing.T) {
- w := httptest.NewRecorder()
- recordRequest := NewResponseRecorder(w)
- recordRequest.WriteHeader(401)
- if w.Code != 401 || recordRequest.status != 401 {
- t.Fatalf("Expected Response status to be set to 401, but found %d\n", recordRequest.status)
- }
-}
-
-func TestWrite(t *testing.T) {
- w := httptest.NewRecorder()
- responseTestString := "test"
- recordRequest := NewResponseRecorder(w)
- buf := []byte(responseTestString)
- recordRequest.Write(buf)
- if recordRequest.size != len(buf) {
- t.Fatalf("Expected the bytes written counter to be %d, but instead found %d\n", len(buf), recordRequest.size)
- }
- if w.Body.String() != responseTestString {
- t.Fatalf("Expected Response Body to be %s , but found %s\n", responseTestString, w.Body.String())
- }
-}
diff --git a/caddyhttp/httpserver/replacer.go b/caddyhttp/httpserver/replacer.go
deleted file mode 100644
index 24757a2756d..00000000000
--- a/caddyhttp/httpserver/replacer.go
+++ /dev/null
@@ -1,367 +0,0 @@
-package httpserver
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/http/httputil"
- "net/url"
- "os"
- "path"
- "strconv"
- "strings"
- "time"
-
- "github.com/mholt/caddy"
-)
-
-// requestReplacer is a strings.Replacer which is used to
-// encode literal \r and \n characters and keep everything
-// on one line
-var requestReplacer = strings.NewReplacer(
- "\r", "\\r",
- "\n", "\\n",
-)
-
-var now = time.Now
-
-// Replacer is a type which can replace placeholder
-// substrings in a string with actual values from a
-// http.Request and ResponseRecorder. Always use
-// NewReplacer to get one of these. Any placeholders
-// made with Set() should overwrite existing values if
-// the key is already used.
-type Replacer interface {
- Replace(string) string
- Set(key, value string)
-}
-
-// replacer implements Replacer. customReplacements
-// is used to store custom replacements created with
-// Set() until the time of replacement, at which point
-// they will be used to overwrite other replacements
-// if there is a name conflict.
-type replacer struct {
- customReplacements map[string]string
- emptyValue string
- responseRecorder *ResponseRecorder
- request *http.Request
- requestBody *limitWriter
-}
-
-type limitWriter struct {
- w bytes.Buffer
- remain int
-}
-
-func newLimitWriter(max int) *limitWriter {
- return &limitWriter{
- w: bytes.Buffer{},
- remain: max,
- }
-}
-
-func (lw *limitWriter) Write(p []byte) (int, error) {
- // skip if we are full
- if lw.remain <= 0 {
- return len(p), nil
- }
- if n := len(p); n > lw.remain {
- p = p[:lw.remain]
- }
- n, err := lw.w.Write(p)
- lw.remain -= n
- return n, err
-}
-
-func (lw *limitWriter) String() string {
- return lw.w.String()
-}
-
-// NewReplacer makes a new replacer based on r and rr which
-// are used for request and response placeholders, respectively.
-// Request placeholders are created immediately, whereas
-// response placeholders are not created until Replace()
-// is invoked. rr may be nil if it is not available.
-// emptyValue should be the string that is used in place
-// of empty string (can still be empty string).
-func NewReplacer(r *http.Request, rr *ResponseRecorder, emptyValue string) Replacer {
- rb := newLimitWriter(MaxLogBodySize)
- if r.Body != nil {
- r.Body = struct {
- io.Reader
- io.Closer
- }{io.TeeReader(r.Body, rb), io.Closer(r.Body)}
- }
- return &replacer{
- request: r,
- requestBody: rb,
- responseRecorder: rr,
- customReplacements: make(map[string]string),
- emptyValue: emptyValue,
- }
-}
-
-func canLogRequest(r *http.Request) bool {
- if r.Method == "POST" || r.Method == "PUT" {
- for _, cType := range r.Header[headerContentType] {
- // the cType could have charset and other info
- if strings.Contains(cType, contentTypeJSON) || strings.Contains(cType, contentTypeXML) {
- return true
- }
- }
- }
- return false
-}
-
-// Replace performs a replacement of values on s and returns
-// the string with the replaced values.
-func (r *replacer) Replace(s string) string {
- // Do not attempt replacements if no placeholder is found.
- if !strings.ContainsAny(s, "{}") {
- return s
- }
-
- result := ""
- for {
- idxStart := strings.Index(s, "{")
- if idxStart == -1 {
- // no placeholder anymore
- break
- }
- idxEnd := strings.Index(s[idxStart:], "}")
- if idxEnd == -1 {
- // unpaired placeholder
- break
- }
- idxEnd += idxStart
-
- // get a replacement
- placeholder := s[idxStart : idxEnd+1]
- replacement := r.getSubstitution(placeholder)
-
- // append prefix + replacement
- result += s[:idxStart] + replacement
-
- // strip out scanned parts
- s = s[idxEnd+1:]
- }
-
- // append unscanned parts
- return result + s
-}
-
-func roundDuration(d time.Duration) time.Duration {
- if d >= time.Millisecond {
- return round(d, time.Millisecond)
- } else if d >= time.Microsecond {
- return round(d, time.Microsecond)
- }
-
- return d
-}
-
-// round rounds d to the nearest r
-func round(d, r time.Duration) time.Duration {
- if r <= 0 {
- return d
- }
- neg := d < 0
- if neg {
- d = -d
- }
- if m := d % r; m+m < r {
- d = d - m
- } else {
- d = d + r - m
- }
- if neg {
- return -d
- }
- return d
-}
-
-// getSubstitution retrieves value from corresponding key
-func (r *replacer) getSubstitution(key string) string {
- // search custom replacements first
- if value, ok := r.customReplacements[key]; ok {
- return value
- }
-
- // search request headers then
- if key[1] == '>' {
- want := key[2 : len(key)-1]
- for key, values := range r.request.Header {
- // Header placeholders (case-insensitive)
- if strings.EqualFold(key, want) {
- return strings.Join(values, ",")
- }
- }
- }
- // next check for cookies
- if key[1] == '~' {
- name := key[2 : len(key)-1]
- if cookie, err := r.request.Cookie(name); err == nil {
- return cookie.Value
- }
- }
- // next check for query argument
- if key[1] == '?' {
- query := r.request.URL.Query()
- name := key[2 : len(key)-1]
- return query.Get(name)
- }
-
- // search default replacements in the end
- switch key {
- case "{method}":
- return r.request.Method
- case "{scheme}":
- if r.request.TLS != nil {
- return "https"
- }
- return "http"
- case "{hostname}":
- name, err := os.Hostname()
- if err != nil {
- return r.emptyValue
- }
- return name
- case "{host}":
- return r.request.Host
- case "{hostonly}":
- host, _, err := net.SplitHostPort(r.request.Host)
- if err != nil {
- return r.request.Host
- }
- return host
- case "{path}":
- u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
- return u.Path
- case "{path_escaped}":
- u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
- return url.QueryEscape(u.Path)
- case "{request_id}":
- reqid, _ := r.request.Context().Value(RequestIDCtxKey).(string)
- return reqid
- case "{rewrite_path}":
- return r.request.URL.Path
- case "{rewrite_path_escaped}":
- return url.QueryEscape(r.request.URL.Path)
- case "{query}":
- u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
- return u.RawQuery
- case "{query_escaped}":
- u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
- return url.QueryEscape(u.RawQuery)
- case "{fragment}":
- u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
- return u.Fragment
- case "{proto}":
- return r.request.Proto
- case "{remote}":
- host, _, err := net.SplitHostPort(r.request.RemoteAddr)
- if err != nil {
- return r.request.RemoteAddr
- }
- return host
- case "{port}":
- _, port, err := net.SplitHostPort(r.request.RemoteAddr)
- if err != nil {
- return r.emptyValue
- }
- return port
- case "{uri}":
- u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
- return u.RequestURI()
- case "{uri_escaped}":
- u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL)
- return url.QueryEscape(u.RequestURI())
- case "{rewrite_uri}":
- return r.request.URL.RequestURI()
- case "{rewrite_uri_escaped}":
- return url.QueryEscape(r.request.URL.RequestURI())
- case "{when}":
- return now().Format(timeFormat)
- case "{when_iso}":
- return now().UTC().Format(timeFormatISOUTC)
- case "{when_unix}":
- return strconv.FormatInt(now().Unix(), 10)
- case "{file}":
- _, file := path.Split(r.request.URL.Path)
- return file
- case "{dir}":
- dir, _ := path.Split(r.request.URL.Path)
- return dir
- case "{request}":
- dump, err := httputil.DumpRequest(r.request, false)
- if err != nil {
- return r.emptyValue
- }
- return requestReplacer.Replace(string(dump))
- case "{request_body}":
- if !canLogRequest(r.request) {
- return r.emptyValue
- }
- _, err := ioutil.ReadAll(r.request.Body)
- if err != nil {
- if err == ErrMaxBytesExceeded {
- return r.emptyValue
- }
- }
- return requestReplacer.Replace(r.requestBody.String())
- case "{mitm}":
- if val, ok := r.request.Context().Value(caddy.CtxKey("mitm")).(bool); ok {
- if val {
- return "likely"
- }
- return "unlikely"
- }
- return "unknown"
- case "{status}":
- if r.responseRecorder == nil {
- return r.emptyValue
- }
- return strconv.Itoa(r.responseRecorder.status)
- case "{size}":
- if r.responseRecorder == nil {
- return r.emptyValue
- }
- return strconv.Itoa(r.responseRecorder.size)
- case "{latency}":
- if r.responseRecorder == nil {
- return r.emptyValue
- }
- return roundDuration(time.Since(r.responseRecorder.start)).String()
- case "{latency_ms}":
- if r.responseRecorder == nil {
- return r.emptyValue
- }
- elapsedDuration := time.Since(r.responseRecorder.start)
- return strconv.FormatInt(convertToMilliseconds(elapsedDuration), 10)
- }
-
- return r.emptyValue
-}
-
-//convertToMilliseconds returns the number of milliseconds in the given duration
-func convertToMilliseconds(d time.Duration) int64 {
- return d.Nanoseconds() / 1e6
-}
-
-// Set sets key to value in the r.customReplacements map.
-func (r *replacer) Set(key, value string) {
- r.customReplacements["{"+key+"}"] = value
-}
-
-const (
- timeFormat = "02/Jan/2006:15:04:05 -0700"
- timeFormatISOUTC = "2006-01-02T15:04:05Z" // ISO 8601 with timezone to be assumed as UTC
- headerContentType = "Content-Type"
- contentTypeJSON = "application/json"
- contentTypeXML = "application/xml"
- // MaxLogBodySize limits the size of logged request's body
- MaxLogBodySize = 100 * 1024
-)
diff --git a/caddyhttp/httpserver/replacer_test.go b/caddyhttp/httpserver/replacer_test.go
deleted file mode 100644
index 4b6a2e44239..00000000000
--- a/caddyhttp/httpserver/replacer_test.go
+++ /dev/null
@@ -1,228 +0,0 @@
-package httpserver
-
-import (
- "context"
- "net/http"
- "net/http/httptest"
- "os"
- "strings"
- "testing"
- "time"
-)
-
-func TestNewReplacer(t *testing.T) {
- w := httptest.NewRecorder()
- recordRequest := NewResponseRecorder(w)
- reader := strings.NewReader(`{"username": "dennis"}`)
-
- request, err := http.NewRequest("POST", "http://localhost", reader)
- if err != nil {
- t.Fatal("Request Formation Failed\n")
- }
- rep := NewReplacer(request, recordRequest, "")
-
- switch v := rep.(type) {
- case *replacer:
- if v.getSubstitution("{host}") != "localhost" {
- t.Error("Expected host to be localhost")
- }
- if v.getSubstitution("{method}") != "POST" {
- t.Error("Expected request method to be POST")
- }
- default:
- t.Fatalf("Expected *replacer underlying Replacer type, got: %#v", rep)
- }
-}
-
-func TestReplace(t *testing.T) {
- w := httptest.NewRecorder()
- recordRequest := NewResponseRecorder(w)
- reader := strings.NewReader(`{"username": "dennis"}`)
-
- request, err := http.NewRequest("POST", "http://localhost/?foo=bar", reader)
- if err != nil {
- t.Fatalf("Failed to make request: %v", err)
- }
- ctx := context.WithValue(request.Context(), OriginalURLCtxKey, *request.URL)
- request = request.WithContext(ctx)
-
- request.Header.Set("Custom", "foobarbaz")
- request.Header.Set("ShorterVal", "1")
- repl := NewReplacer(request, recordRequest, "-")
- // add some headers after creating replacer
- request.Header.Set("CustomAdd", "caddy")
- request.Header.Set("Cookie", "foo=bar; taste=delicious")
-
- hostname, err := os.Hostname()
- if err != nil {
- t.Fatalf("Failed to determine hostname: %v", err)
- }
-
- old := now
- now = func() time.Time {
- return time.Date(2006, 1, 2, 15, 4, 5, 02, time.FixedZone("hardcoded", -7))
- }
- defer func() {
- now = old
- }()
- testCases := []struct {
- template string
- expect string
- }{
- {"This hostname is {hostname}", "This hostname is " + hostname},
- {"This host is {host}.", "This host is localhost."},
- {"This request method is {method}.", "This request method is POST."},
- {"The response status is {status}.", "The response status is 200."},
- {"{when}", "02/Jan/2006:15:04:05 +0000"},
- {"{when_iso}", "2006-01-02T15:04:12Z"},
- {"{when_unix}", "1136214252"},
- {"The Custom header is {>Custom}.", "The Custom header is foobarbaz."},
- {"The CustomAdd header is {>CustomAdd}.", "The CustomAdd header is caddy."},
- {"The request is {request}.", "The request is POST /?foo=bar HTTP/1.1\\r\\nHost: localhost\\r\\n" +
- "Cookie: foo=bar; taste=delicious\\r\\nCustom: foobarbaz\\r\\nCustomadd: caddy\\r\\n" +
- "Shorterval: 1\\r\\n\\r\\n."},
- {"The cUsToM header is {>cUsToM}...", "The cUsToM header is foobarbaz..."},
- {"The Non-Existent header is {>Non-Existent}.", "The Non-Existent header is -."},
- {"Bad {host placeholder...", "Bad {host placeholder..."},
- {"Bad {>Custom placeholder", "Bad {>Custom placeholder"},
- {"Bad {>Custom placeholder {>ShorterVal}", "Bad -"},
- {"Bad {}", "Bad -"},
- {"Cookies are {~taste}", "Cookies are delicious"},
- {"Missing cookie is {~missing}", "Missing cookie is -"},
- {"Query string is {query}", "Query string is foo=bar"},
- {"Query string value for foo is {?foo}", "Query string value for foo is bar"},
- {"Missing query string argument is {?missing}", "Missing query string argument is "},
- }
-
- for _, c := range testCases {
- if expected, actual := c.expect, repl.Replace(c.template); expected != actual {
- t.Errorf("for template '%s', expected '%s', got '%s'", c.template, expected, actual)
- }
- }
-
- complexCases := []struct {
- template string
- replacements map[string]string
- expect string
- }{
- {
- "/a{1}/{2}",
- map[string]string{
- "{1}": "12",
- "{2}": "",
- },
- "/a12/"},
- }
-
- for _, c := range complexCases {
- repl := &replacer{
- customReplacements: c.replacements,
- }
- if expected, actual := c.expect, repl.Replace(c.template); expected != actual {
- t.Errorf("for template '%s', expected '%s', got '%s'", c.template, expected, actual)
- }
- }
-}
-
-func TestSet(t *testing.T) {
- w := httptest.NewRecorder()
- recordRequest := NewResponseRecorder(w)
- reader := strings.NewReader(`{"username": "dennis"}`)
-
- request, err := http.NewRequest("POST", "http://localhost", reader)
- if err != nil {
- t.Fatalf("Request Formation Failed: %s\n", err.Error())
- }
- repl := NewReplacer(request, recordRequest, "")
-
- repl.Set("host", "getcaddy.com")
- repl.Set("method", "GET")
- repl.Set("status", "201")
- repl.Set("variable", "value")
-
- if repl.Replace("This host is {host}") != "This host is getcaddy.com" {
- t.Error("Expected host replacement failed")
- }
- if repl.Replace("This request method is {method}") != "This request method is GET" {
- t.Error("Expected method replacement failed")
- }
- if repl.Replace("The response status is {status}") != "The response status is 201" {
- t.Error("Expected status replacement failed")
- }
- if repl.Replace("The value of variable is {variable}") != "The value of variable is value" {
- t.Error("Expected variable replacement failed")
- }
-}
-
-// Test function to test that various placeholders hold correct values after a rewrite
-// has been performed. The NewRequest actually contains the rewritten value.
-func TestPathRewrite(t *testing.T) {
- w := httptest.NewRecorder()
- recordRequest := NewResponseRecorder(w)
- reader := strings.NewReader(`{"username": "dennis"}`)
-
- request, err := http.NewRequest("POST", "http://getcaddy.com/index.php?key=value", reader)
- if err != nil {
- t.Fatalf("Request Formation Failed: %s\n", err.Error())
- }
- urlCopy := *request.URL
- urlCopy.Path = "a/custom/path.php"
- ctx := context.WithValue(request.Context(), OriginalURLCtxKey, urlCopy)
- request = request.WithContext(ctx)
-
- repl := NewReplacer(request, recordRequest, "")
-
- if got, want := repl.Replace("This path is '{path}'"), "This path is 'a/custom/path.php'"; got != want {
- t.Errorf("{path} replacement failed; got '%s', want '%s'", got, want)
- }
-
- if got, want := repl.Replace("This path is {rewrite_path}"), "This path is /index.php"; got != want {
- t.Errorf("{rewrite_path} replacement failed; got '%s', want '%s'", got, want)
- }
- if got, want := repl.Replace("This path is '{uri}'"), "This path is 'a/custom/path.php?key=value'"; got != want {
- t.Errorf("{uri} replacement failed; got '%s', want '%s'", got, want)
- }
-
- if got, want := repl.Replace("This path is {rewrite_uri}"), "This path is /index.php?key=value"; got != want {
- t.Errorf("{rewrite_uri} replacement failed; got '%s', want '%s'", got, want)
- }
-
-}
-
-func TestRound(t *testing.T) {
- var tests = map[time.Duration]time.Duration{
- // 599.935µs -> 560µs
- 559935 * time.Nanosecond: 560 * time.Microsecond,
- // 1.55ms -> 2ms
- 1550 * time.Microsecond: 2 * time.Millisecond,
- // 1.5555s -> 1.556s
- 1555500 * time.Microsecond: 1556 * time.Millisecond,
- // 1m2.0035s -> 1m2.004s
- 62003500 * time.Microsecond: 62004 * time.Millisecond,
- }
-
- for dur, expected := range tests {
- rounded := roundDuration(dur)
- if rounded != expected {
- t.Errorf("Expected %v, Got %v", expected, rounded)
- }
- }
-}
-
-func TestMillisecondConverstion(t *testing.T) {
- var testCases = map[time.Duration]int64{
- 2 * time.Second: 2000,
- 9039492 * time.Nanosecond: 9,
- 1000 * time.Microsecond: 1,
- 127 * time.Nanosecond: 0,
- 0 * time.Millisecond: 0,
- 255 * time.Millisecond: 255,
- }
-
- for dur, expected := range testCases {
- numMillisecond := convertToMilliseconds(dur)
- if numMillisecond != expected {
- t.Errorf("Expected %v. Got %v", expected, numMillisecond)
- }
- }
-}
diff --git a/caddyhttp/httpserver/responsewriterwrapper.go b/caddyhttp/httpserver/responsewriterwrapper.go
deleted file mode 100644
index 350d0e6c979..00000000000
--- a/caddyhttp/httpserver/responsewriterwrapper.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package httpserver
-
-import (
- "bufio"
- "net"
- "net/http"
-)
-
-// ResponseWriterWrapper wrappers underlying ResponseWriter
-// and inherits its Hijacker/Pusher/CloseNotifier/Flusher as well.
-type ResponseWriterWrapper struct {
- http.ResponseWriter
-}
-
-// Hijack implements http.Hijacker. It simply wraps the underlying
-// ResponseWriter's Hijack method if there is one, or returns an error.
-func (rww *ResponseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- if hj, ok := rww.ResponseWriter.(http.Hijacker); ok {
- return hj.Hijack()
- }
- return nil, nil, NonHijackerError{Underlying: rww.ResponseWriter}
-}
-
-// Flush implements http.Flusher. It simply wraps the underlying
-// ResponseWriter's Flush method if there is one, or panics.
-func (rww *ResponseWriterWrapper) Flush() {
- if f, ok := rww.ResponseWriter.(http.Flusher); ok {
- f.Flush()
- } else {
- panic(NonFlusherError{Underlying: rww.ResponseWriter})
- }
-}
-
-// CloseNotify implements http.CloseNotifier.
-// It just inherits the underlying ResponseWriter's CloseNotify method.
-// It panics if the underlying ResponseWriter is not a CloseNotifier.
-func (rww *ResponseWriterWrapper) CloseNotify() <-chan bool {
- if cn, ok := rww.ResponseWriter.(http.CloseNotifier); ok {
- return cn.CloseNotify()
- }
- panic(NonCloseNotifierError{Underlying: rww.ResponseWriter})
-}
-
-// Push implements http.Pusher.
-// It just inherits the underlying ResponseWriter's Push method.
-// It panics if the underlying ResponseWriter is not a Pusher.
-func (rww *ResponseWriterWrapper) Push(target string, opts *http.PushOptions) error {
- if pusher, hasPusher := rww.ResponseWriter.(http.Pusher); hasPusher {
- return pusher.Push(target, opts)
- }
-
- return NonPusherError{Underlying: rww.ResponseWriter}
-}
-
-// HTTPInterfaces mix all the interfaces that middleware ResponseWriters need to support.
-type HTTPInterfaces interface {
- http.ResponseWriter
- http.Pusher
- http.Flusher
- http.CloseNotifier
- http.Hijacker
-}
-
-// Interface guards
-var _ HTTPInterfaces = (*ResponseWriterWrapper)(nil)
diff --git a/caddyhttp/httpserver/roller.go b/caddyhttp/httpserver/roller.go
deleted file mode 100644
index 8f0823bee7d..00000000000
--- a/caddyhttp/httpserver/roller.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package httpserver
-
-import (
- "errors"
- "io"
- "path/filepath"
- "strconv"
-
- "gopkg.in/natefinch/lumberjack.v2"
-)
-
-// LogRoller implements a type that provides a rolling logger.
-type LogRoller struct {
- Filename string
- MaxSize int
- MaxAge int
- MaxBackups int
- Compress bool
- LocalTime bool
-}
-
-// GetLogWriter returns an io.Writer that writes to a rolling logger.
-// This should be called only from the main goroutine (like during
-// server setup) because this method is not thread-safe; it is careful
-// to create only one log writer per log file, even if the log file
-// is shared by different sites or middlewares. This ensures that
-// rolling is synchronized, since a process (or multiple processes)
-// should not create more than one roller on the same file at the
-// same time. See issue #1363.
-func (l LogRoller) GetLogWriter() io.Writer {
- absPath, err := filepath.Abs(l.Filename)
- if err != nil {
- absPath = l.Filename // oh well, hopefully they're consistent in how they specify the filename
- }
- lj, has := lumberjacks[absPath]
- if !has {
- lj = &lumberjack.Logger{
- Filename: l.Filename,
- MaxSize: l.MaxSize,
- MaxAge: l.MaxAge,
- MaxBackups: l.MaxBackups,
- Compress: l.Compress,
- LocalTime: l.LocalTime,
- }
- lumberjacks[absPath] = lj
- }
- return lj
-}
-
-// IsLogRollerSubdirective is true if the subdirective is for the log roller.
-func IsLogRollerSubdirective(subdir string) bool {
- return subdir == directiveRotateSize ||
- subdir == directiveRotateAge ||
- subdir == directiveRotateKeep ||
- subdir == directiveRotateCompress
-}
-
-var invalidRollerParameterErr = errors.New("invalid roller parameter")
-
-// ParseRoller parses roller contents out of c.
-func ParseRoller(l *LogRoller, what string, where ...string) error {
- if l == nil {
- l = DefaultLogRoller()
- }
-
- // rotate_compress doesn't accept any parameters.
- // others only accept one parameter
- if (what == directiveRotateCompress && len(where) != 0) ||
- (what != directiveRotateCompress && len(where) != 1) {
- return invalidRollerParameterErr
- }
-
- var (
- value int
- err error
- )
- if what != directiveRotateCompress {
- value, err = strconv.Atoi(where[0])
- if err != nil {
- return err
- }
- }
-
- switch what {
- case directiveRotateSize:
- l.MaxSize = value
- case directiveRotateAge:
- l.MaxAge = value
- case directiveRotateKeep:
- l.MaxBackups = value
- case directiveRotateCompress:
- l.Compress = true
- }
- return nil
-}
-
-// DefaultLogRoller will roll logs by default.
-func DefaultLogRoller() *LogRoller {
- return &LogRoller{
- MaxSize: defaultRotateSize,
- MaxAge: defaultRotateAge,
- MaxBackups: defaultRotateKeep,
- Compress: false,
- LocalTime: true,
- }
-}
-
-const (
- // defaultRotateSize is 100 MB.
- defaultRotateSize = 100
- // defaultRotateAge is 14 days.
- defaultRotateAge = 14
- // defaultRotateKeep is 10 files.
- defaultRotateKeep = 10
-
- directiveRotateSize = "rotate_size"
- directiveRotateAge = "rotate_age"
- directiveRotateKeep = "rotate_keep"
- directiveRotateCompress = "rotate_compress"
-)
-
-// lumberjacks maps log filenames to the logger
-// that is being used to keep them rolled/maintained.
-var lumberjacks = make(map[string]*lumberjack.Logger)
diff --git a/caddyhttp/httpserver/server.go b/caddyhttp/httpserver/server.go
deleted file mode 100644
index 6a51aa9c088..00000000000
--- a/caddyhttp/httpserver/server.go
+++ /dev/null
@@ -1,541 +0,0 @@
-// Package httpserver implements an HTTP server on top of Caddy.
-package httpserver
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "log"
- "net"
- "net/http"
- "net/url"
- "os"
- "path"
- "path/filepath"
- "runtime"
- "strings"
- "sync"
- "time"
-
- "github.com/lucas-clemente/quic-go/h2quic"
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/staticfiles"
- "github.com/mholt/caddy/caddytls"
-)
-
-// Server is the HTTP server implementation.
-type Server struct {
- Server *http.Server
- quicServer *h2quic.Server
- listener net.Listener
- listenerMu sync.Mutex
- sites []*SiteConfig
- connTimeout time.Duration // max time to wait for a connection before force stop
- tlsGovChan chan struct{} // close to stop the TLS maintenance goroutine
- vhosts *vhostTrie
-}
-
-// ensure it satisfies the interface
-var _ caddy.GracefulServer = new(Server)
-
-var defaultALPN = []string{"h2", "http/1.1"}
-
-// makeTLSConfig extracts TLS settings from each site config to
-// build a tls.Config usable in Caddy HTTP servers. The returned
-// config will be nil if TLS is disabled for these sites.
-func makeTLSConfig(group []*SiteConfig) (*tls.Config, error) {
- var tlsConfigs []*caddytls.Config
- for i := range group {
- if HTTP2 && len(group[i].TLS.ALPN) == 0 {
- // if no application-level protocol was configured up to now,
- // default to HTTP/2, then HTTP/1.1 if necessary
- group[i].TLS.ALPN = defaultALPN
- }
- tlsConfigs = append(tlsConfigs, group[i].TLS)
- }
- return caddytls.MakeTLSConfig(tlsConfigs)
-}
-
-func getFallbacks(sites []*SiteConfig) []string {
- fallbacks := []string{}
- for _, sc := range sites {
- if sc.FallbackSite {
- fallbacks = append(fallbacks, sc.Addr.Host)
- }
- }
- return fallbacks
-}
-
-// NewServer creates a new Server instance that will listen on addr
-// and will serve the sites configured in group.
-func NewServer(addr string, group []*SiteConfig) (*Server, error) {
- s := &Server{
- Server: makeHTTPServerWithTimeouts(addr, group),
- vhosts: newVHostTrie(),
- sites: group,
- connTimeout: GracefulTimeout,
- }
- s.vhosts.fallbackHosts = append(s.vhosts.fallbackHosts, getFallbacks(group)...)
- s.Server = makeHTTPServerWithHeaderLimit(s.Server, group)
- s.Server.Handler = s // this is weird, but whatever
-
- // extract TLS settings from each site config to build
- // a tls.Config, which will not be nil if TLS is enabled
- tlsConfig, err := makeTLSConfig(group)
- if err != nil {
- return nil, err
- }
- s.Server.TLSConfig = tlsConfig
-
- // if TLS is enabled, make sure we prepare the Server accordingly
- if s.Server.TLSConfig != nil {
- // enable QUIC if desired (requires HTTP/2)
- if HTTP2 && QUIC {
- s.quicServer = &h2quic.Server{Server: s.Server}
- s.Server.Handler = s.wrapWithSvcHeaders(s.Server.Handler)
- }
-
- // wrap the HTTP handler with a handler that does MITM detection
- tlsh := &tlsHandler{next: s.Server.Handler}
- s.Server.Handler = tlsh // this needs to be the "outer" handler when Serve() is called, for type assertion
-
- // when Serve() creates the TLS listener later, that listener should
- // be adding a reference the ClientHello info to a map; this callback
- // will be sure to clear out that entry when the connection closes.
- s.Server.ConnState = func(c net.Conn, cs http.ConnState) {
- // when a connection closes or is hijacked, delete its entry
- // in the map, because we are done with it.
- if tlsh.listener != nil {
- if cs == http.StateHijacked || cs == http.StateClosed {
- tlsh.listener.helloInfosMu.Lock()
- delete(tlsh.listener.helloInfos, c.RemoteAddr().String())
- tlsh.listener.helloInfosMu.Unlock()
- }
- }
- }
-
- // As of Go 1.7, if the Server's TLSConfig is not nil, HTTP/2 is enabled only
- // if TLSConfig.NextProtos includes the string "h2"
- if HTTP2 && len(s.Server.TLSConfig.NextProtos) == 0 {
- // some experimenting shows that this NextProtos must have at least
- // one value that overlaps with the NextProtos of any other tls.Config
- // that is returned from GetConfigForClient; if there is no overlap,
- // the connection will fail (as of Go 1.8, Feb. 2017).
- s.Server.TLSConfig.NextProtos = defaultALPN
- }
- }
-
- // Compile custom middleware for every site (enables virtual hosting)
- for _, site := range group {
- stack := Handler(staticfiles.FileServer{Root: http.Dir(site.Root), Hide: site.HiddenFiles})
- for i := len(site.middleware) - 1; i >= 0; i-- {
- stack = site.middleware[i](stack)
- }
- site.middlewareChain = stack
- s.vhosts.Insert(site.Addr.VHost(), site)
- }
-
- return s, nil
-}
-
-// makeHTTPServerWithHeaderLimit apply minimum header limit within a group to given http.Server
-func makeHTTPServerWithHeaderLimit(s *http.Server, group []*SiteConfig) *http.Server {
- var min int64
- for _, cfg := range group {
- limit := cfg.Limits.MaxRequestHeaderSize
- if limit == 0 {
- continue
- }
-
- // not set yet
- if min == 0 {
- min = limit
- }
-
- // find a better one
- if limit < min {
- min = limit
- }
- }
-
- if min > 0 {
- s.MaxHeaderBytes = int(min)
- }
- return s
-}
-
-// makeHTTPServerWithTimeouts makes an http.Server from the group of
-// configs in a way that configures timeouts (or, if not set, it uses
-// the default timeouts) by combining the configuration of each
-// SiteConfig in the group. (Timeouts are important for mitigating
-// slowloris attacks.)
-func makeHTTPServerWithTimeouts(addr string, group []*SiteConfig) *http.Server {
- // find the minimum duration configured for each timeout
- var min Timeouts
- for _, cfg := range group {
- if cfg.Timeouts.ReadTimeoutSet &&
- (!min.ReadTimeoutSet || cfg.Timeouts.ReadTimeout < min.ReadTimeout) {
- min.ReadTimeoutSet = true
- min.ReadTimeout = cfg.Timeouts.ReadTimeout
- }
- if cfg.Timeouts.ReadHeaderTimeoutSet &&
- (!min.ReadHeaderTimeoutSet || cfg.Timeouts.ReadHeaderTimeout < min.ReadHeaderTimeout) {
- min.ReadHeaderTimeoutSet = true
- min.ReadHeaderTimeout = cfg.Timeouts.ReadHeaderTimeout
- }
- if cfg.Timeouts.WriteTimeoutSet &&
- (!min.WriteTimeoutSet || cfg.Timeouts.WriteTimeout < min.WriteTimeout) {
- min.WriteTimeoutSet = true
- min.WriteTimeout = cfg.Timeouts.WriteTimeout
- }
- if cfg.Timeouts.IdleTimeoutSet &&
- (!min.IdleTimeoutSet || cfg.Timeouts.IdleTimeout < min.IdleTimeout) {
- min.IdleTimeoutSet = true
- min.IdleTimeout = cfg.Timeouts.IdleTimeout
- }
- }
-
- // for the values that were not set, use defaults
- if !min.ReadTimeoutSet {
- min.ReadTimeout = defaultTimeouts.ReadTimeout
- }
- if !min.ReadHeaderTimeoutSet {
- min.ReadHeaderTimeout = defaultTimeouts.ReadHeaderTimeout
- }
- if !min.WriteTimeoutSet {
- min.WriteTimeout = defaultTimeouts.WriteTimeout
- }
- if !min.IdleTimeoutSet {
- min.IdleTimeout = defaultTimeouts.IdleTimeout
- }
-
- // set the final values on the server and return it
- return &http.Server{
- Addr: addr,
- ReadTimeout: min.ReadTimeout,
- ReadHeaderTimeout: min.ReadHeaderTimeout,
- WriteTimeout: min.WriteTimeout,
- IdleTimeout: min.IdleTimeout,
- }
-}
-
-func (s *Server) wrapWithSvcHeaders(previousHandler http.Handler) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- s.quicServer.SetQuicHeaders(w.Header())
- previousHandler.ServeHTTP(w, r)
- }
-}
-
-// Listen creates an active listener for s that can be
-// used to serve requests.
-func (s *Server) Listen() (net.Listener, error) {
- if s.Server == nil {
- return nil, fmt.Errorf("Server field is nil")
- }
-
- ln, err := net.Listen("tcp", s.Server.Addr)
- if err != nil {
- var succeeded bool
- if runtime.GOOS == "windows" {
- // Windows has been known to keep sockets open even after closing the listeners.
- // Tests reveal this error case easily because they call Start() then Stop()
- // in succession. TODO: Better way to handle this? And why limit this to Windows?
- for i := 0; i < 20; i++ {
- time.Sleep(100 * time.Millisecond)
- ln, err = net.Listen("tcp", s.Server.Addr)
- if err == nil {
- succeeded = true
- break
- }
- }
- }
- if !succeeded {
- return nil, err
- }
- }
-
- if tcpLn, ok := ln.(*net.TCPListener); ok {
- ln = tcpKeepAliveListener{TCPListener: tcpLn}
- }
-
- cln := ln.(caddy.Listener)
- for _, site := range s.sites {
- for _, m := range site.listenerMiddleware {
- cln = m(cln)
- }
- }
-
- // Very important to return a concrete caddy.Listener
- // implementation for graceful restarts.
- return cln.(caddy.Listener), nil
-}
-
-// ListenPacket creates udp connection for QUIC if it is enabled,
-func (s *Server) ListenPacket() (net.PacketConn, error) {
- if QUIC {
- udpAddr, err := net.ResolveUDPAddr("udp", s.Server.Addr)
- if err != nil {
- return nil, err
- }
- return net.ListenUDP("udp", udpAddr)
- }
- return nil, nil
-}
-
-// Serve serves requests on ln. It blocks until ln is closed.
-func (s *Server) Serve(ln net.Listener) error {
- s.listenerMu.Lock()
- s.listener = ln
- s.listenerMu.Unlock()
-
- if s.Server.TLSConfig != nil {
- // Create TLS listener - note that we do not replace s.listener
- // with this TLS listener; tls.listener is unexported and does
- // not implement the File() method we need for graceful restarts
- // on POSIX systems.
- // TODO: Is this ^ still relevant anymore? Maybe we can now that it's a net.Listener...
- ln = newTLSListener(ln, s.Server.TLSConfig)
- if handler, ok := s.Server.Handler.(*tlsHandler); ok {
- handler.listener = ln.(*tlsHelloListener)
- }
-
- // Rotate TLS session ticket keys
- s.tlsGovChan = caddytls.RotateSessionTicketKeys(s.Server.TLSConfig)
- }
-
- err := s.Server.Serve(ln)
- if s.quicServer != nil {
- s.quicServer.Close()
- }
- return err
-}
-
-// ServePacket serves QUIC requests on pc until it is closed.
-func (s *Server) ServePacket(pc net.PacketConn) error {
- if s.quicServer != nil {
- err := s.quicServer.Serve(pc.(*net.UDPConn))
- return fmt.Errorf("serving QUIC connections: %v", err)
- }
- return nil
-}
-
-// ServeHTTP is the entry point of all HTTP requests.
-func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- defer func() {
- // We absolutely need to be sure we stay alive up here,
- // even though, in theory, the errors middleware does this.
- if rec := recover(); rec != nil {
- log.Printf("[PANIC] %v", rec)
- DefaultErrorFunc(w, r, http.StatusInternalServerError)
- }
- }()
-
- // copy the original, unchanged URL into the context
- // so it can be referenced by middlewares
- urlCopy := *r.URL
- if r.URL.User != nil {
- userInfo := new(url.Userinfo)
- *userInfo = *r.URL.User
- urlCopy.User = userInfo
- }
- c := context.WithValue(r.Context(), OriginalURLCtxKey, urlCopy)
- r = r.WithContext(c)
-
- w.Header().Set("Server", caddy.AppName)
-
- status, _ := s.serveHTTP(w, r)
-
- // Fallback error response in case error handling wasn't chained in
- if status >= 400 {
- DefaultErrorFunc(w, r, status)
- }
-}
-
-func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- // strip out the port because it's not used in virtual
- // hosting; the port is irrelevant because each listener
- // is on a different port.
- hostname, _, err := net.SplitHostPort(r.Host)
- if err != nil {
- hostname = r.Host
- }
-
- // look up the virtualhost; if no match, serve error
- vhost, pathPrefix := s.vhosts.Match(hostname + r.URL.Path)
- c := context.WithValue(r.Context(), caddy.CtxKey("path_prefix"), pathPrefix)
- r = r.WithContext(c)
-
- if vhost == nil {
- // check for ACME challenge even if vhost is nil;
- // could be a new host coming online soon
- if caddytls.HTTPChallengeHandler(w, r, "localhost", caddytls.DefaultHTTPAlternatePort) {
- return 0, nil
- }
- // otherwise, log the error and write a message to the client
- remoteHost, _, err := net.SplitHostPort(r.RemoteAddr)
- if err != nil {
- remoteHost = r.RemoteAddr
- }
- WriteSiteNotFound(w, r) // don't add headers outside of this function
- log.Printf("[INFO] %s - No such site at %s (Remote: %s, Referer: %s)",
- hostname, s.Server.Addr, remoteHost, r.Header.Get("Referer"))
- return 0, nil
- }
-
- // we still check for ACME challenge if the vhost exists,
- // because we must apply its HTTP challenge config settings
- if s.proxyHTTPChallenge(vhost, w, r) {
- return 0, nil
- }
-
- // trim the path portion of the site address from the beginning of
- // the URL path, so a request to example.com/foo/blog on the site
- // defined as example.com/foo appears as /blog instead of /foo/blog.
- if pathPrefix != "/" {
- r.URL.Path = strings.TrimPrefix(r.URL.Path, pathPrefix)
- if !strings.HasPrefix(r.URL.Path, "/") {
- r.URL.Path = "/" + r.URL.Path
- }
- }
-
- return vhost.middlewareChain.ServeHTTP(w, r)
-}
-
-// proxyHTTPChallenge solves the ACME HTTP challenge if r is the HTTP
-// request for the challenge. If it is, and if the request has been
-// fulfilled (response written), true is returned; false otherwise.
-// If you don't have a vhost, just call the challenge handler directly.
-func (s *Server) proxyHTTPChallenge(vhost *SiteConfig, w http.ResponseWriter, r *http.Request) bool {
- if vhost.Addr.Port != caddytls.HTTPChallengePort {
- return false
- }
- if vhost.TLS != nil && vhost.TLS.Manual {
- return false
- }
- altPort := caddytls.DefaultHTTPAlternatePort
- if vhost.TLS != nil && vhost.TLS.AltHTTPPort != "" {
- altPort = vhost.TLS.AltHTTPPort
- }
- return caddytls.HTTPChallengeHandler(w, r, vhost.ListenHost, altPort)
-}
-
-// Address returns the address s was assigned to listen on.
-func (s *Server) Address() string {
- return s.Server.Addr
-}
-
-// Stop stops s gracefully (or forcefully after timeout) and
-// closes its listener.
-func (s *Server) Stop() error {
- ctx, cancel := context.WithTimeout(context.Background(), s.connTimeout)
- defer cancel()
-
- err := s.Server.Shutdown(ctx)
- if err != nil {
- return err
- }
-
- // signal any TLS governor goroutines to exit
- if s.tlsGovChan != nil {
- close(s.tlsGovChan)
- }
-
- return nil
-}
-
-// OnStartupComplete lists the sites served by this server
-// and any relevant information, assuming caddy.Quiet == false.
-func (s *Server) OnStartupComplete() {
- if caddy.Quiet {
- return
- }
- for _, site := range s.sites {
- output := site.Addr.String()
- if caddy.IsLoopback(s.Address()) && !caddy.IsLoopback(site.Addr.Host) {
- output += " (only accessible on this machine)"
- }
- fmt.Println(output)
- log.Println(output)
- }
-}
-
-// defaultTimeouts stores the default timeout values to use
-// if left unset by user configuration. NOTE: Most default
-// timeouts are disabled (see issues #1464 and #1733).
-var defaultTimeouts = Timeouts{IdleTimeout: 5 * time.Minute}
-
-// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
-// connections. It's used by ListenAndServe and ListenAndServeTLS so
-// dead TCP connections (e.g. closing laptop mid-download) eventually
-// go away.
-//
-// Borrowed from the Go standard library.
-type tcpKeepAliveListener struct {
- *net.TCPListener
-}
-
-// Accept accepts the connection with a keep-alive enabled.
-func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
- tc, err := ln.AcceptTCP()
- if err != nil {
- return
- }
- tc.SetKeepAlive(true)
- tc.SetKeepAlivePeriod(3 * time.Minute)
- return tc, nil
-}
-
-// File implements caddy.Listener; it returns the underlying file of the listener.
-func (ln tcpKeepAliveListener) File() (*os.File, error) {
- return ln.TCPListener.File()
-}
-
-// ErrMaxBytesExceeded is the error returned by MaxBytesReader
-// when the request body exceeds the limit imposed
-var ErrMaxBytesExceeded = errors.New("http: request body too large")
-
-// DefaultErrorFunc responds to an HTTP request with a simple description
-// of the specified HTTP status code.
-func DefaultErrorFunc(w http.ResponseWriter, r *http.Request, status int) {
- WriteTextResponse(w, status, fmt.Sprintf("%d %s\n", status, http.StatusText(status)))
-}
-
-const httpStatusMisdirectedRequest = 421 // RFC 7540, 9.1.2
-
-// WriteSiteNotFound writes appropriate error code to w, signaling that
-// requested host is not served by Caddy on a given port.
-func WriteSiteNotFound(w http.ResponseWriter, r *http.Request) {
- status := http.StatusNotFound
- if r.ProtoMajor >= 2 {
- // TODO: use http.StatusMisdirectedRequest when it gets defined
- status = httpStatusMisdirectedRequest
- }
- WriteTextResponse(w, status, fmt.Sprintf("%d Site %s is not served on this interface\n", status, r.Host))
-}
-
-// WriteTextResponse writes body with code status to w. The body will
-// be interpreted as plain text.
-func WriteTextResponse(w http.ResponseWriter, status int, body string) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- w.Header().Set("X-Content-Type-Options", "nosniff")
- w.WriteHeader(status)
- w.Write([]byte(body))
-}
-
-// SafePath joins siteRoot and reqPath and converts it to a path that can
-// be used to access a path on the local disk. It ensures the path does
-// not traverse outside of the site root.
-//
-// If opening a file, use http.Dir instead.
-func SafePath(siteRoot, reqPath string) string {
- reqPath = filepath.ToSlash(reqPath)
- reqPath = strings.Replace(reqPath, "\x00", "", -1) // NOTE: Go 1.9 checks for null bytes in the syscall package
- if siteRoot == "" {
- siteRoot = "."
- }
- return filepath.Join(siteRoot, filepath.FromSlash(path.Clean("/"+reqPath)))
-}
-
-// OriginalURLCtxKey is the key for accessing the original, incoming URL on an HTTP request.
-const OriginalURLCtxKey = caddy.CtxKey("original_url")
diff --git a/caddyhttp/httpserver/server_test.go b/caddyhttp/httpserver/server_test.go
deleted file mode 100644
index 036ee3dd81a..00000000000
--- a/caddyhttp/httpserver/server_test.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package httpserver
-
-import (
- "net/http"
- "testing"
- "time"
-)
-
-func TestAddress(t *testing.T) {
- addr := "127.0.0.1:9005"
- srv := &Server{Server: &http.Server{Addr: addr}}
-
- if got, want := srv.Address(), addr; got != want {
- t.Errorf("Expected '%s' but got '%s'", want, got)
- }
-}
-
-func TestMakeHTTPServerWithTimeouts(t *testing.T) {
- for i, tc := range []struct {
- group []*SiteConfig
- expected Timeouts
- }{
- {
- group: []*SiteConfig{{Timeouts: Timeouts{}}},
- expected: Timeouts{
- ReadTimeout: defaultTimeouts.ReadTimeout,
- ReadHeaderTimeout: defaultTimeouts.ReadHeaderTimeout,
- WriteTimeout: defaultTimeouts.WriteTimeout,
- IdleTimeout: defaultTimeouts.IdleTimeout,
- },
- },
- {
- group: []*SiteConfig{{Timeouts: Timeouts{
- ReadTimeout: 1 * time.Second,
- ReadTimeoutSet: true,
- ReadHeaderTimeout: 2 * time.Second,
- ReadHeaderTimeoutSet: true,
- }}},
- expected: Timeouts{
- ReadTimeout: 1 * time.Second,
- ReadHeaderTimeout: 2 * time.Second,
- WriteTimeout: defaultTimeouts.WriteTimeout,
- IdleTimeout: defaultTimeouts.IdleTimeout,
- },
- },
- {
- group: []*SiteConfig{{Timeouts: Timeouts{
- ReadTimeoutSet: true,
- WriteTimeoutSet: true,
- }}},
- expected: Timeouts{
- ReadTimeout: 0,
- ReadHeaderTimeout: defaultTimeouts.ReadHeaderTimeout,
- WriteTimeout: 0,
- IdleTimeout: defaultTimeouts.IdleTimeout,
- },
- },
- {
- group: []*SiteConfig{
- {Timeouts: Timeouts{
- ReadTimeout: 2 * time.Second,
- ReadTimeoutSet: true,
- WriteTimeout: 2 * time.Second,
- WriteTimeoutSet: true,
- }},
- {Timeouts: Timeouts{
- ReadTimeout: 1 * time.Second,
- ReadTimeoutSet: true,
- WriteTimeout: 1 * time.Second,
- WriteTimeoutSet: true,
- }},
- },
- expected: Timeouts{
- ReadTimeout: 1 * time.Second,
- ReadHeaderTimeout: defaultTimeouts.ReadHeaderTimeout,
- WriteTimeout: 1 * time.Second,
- IdleTimeout: defaultTimeouts.IdleTimeout,
- },
- },
- {
- group: []*SiteConfig{{Timeouts: Timeouts{
- ReadHeaderTimeout: 5 * time.Second,
- ReadHeaderTimeoutSet: true,
- IdleTimeout: 10 * time.Second,
- IdleTimeoutSet: true,
- }}},
- expected: Timeouts{
- ReadTimeout: defaultTimeouts.ReadTimeout,
- ReadHeaderTimeout: 5 * time.Second,
- WriteTimeout: defaultTimeouts.WriteTimeout,
- IdleTimeout: 10 * time.Second,
- },
- },
- } {
- actual := makeHTTPServerWithTimeouts("127.0.0.1:9005", tc.group)
-
- if got, want := actual.Addr, "127.0.0.1:9005"; got != want {
- t.Errorf("Test %d: Expected Addr=%s, but was %s", i, want, got)
- }
- if got, want := actual.ReadTimeout, tc.expected.ReadTimeout; got != want {
- t.Errorf("Test %d: Expected ReadTimeout=%v, but was %v", i, want, got)
- }
- if got, want := actual.ReadHeaderTimeout, tc.expected.ReadHeaderTimeout; got != want {
- t.Errorf("Test %d: Expected ReadHeaderTimeout=%v, but was %v", i, want, got)
- }
- if got, want := actual.WriteTimeout, tc.expected.WriteTimeout; got != want {
- t.Errorf("Test %d: Expected WriteTimeout=%v, but was %v", i, want, got)
- }
- if got, want := actual.IdleTimeout, tc.expected.IdleTimeout; got != want {
- t.Errorf("Test %d: Expected IdleTimeout=%v, but was %v", i, want, got)
- }
- }
-}
-
-func TestMakeHTTPServerWithHeaderLimit(t *testing.T) {
- for name, c := range map[string]struct {
- group []*SiteConfig
- expect int
- }{
- "disable": {
- group: []*SiteConfig{{}},
- expect: 0,
- },
- "oneSite": {
- group: []*SiteConfig{{Limits: Limits{
- MaxRequestHeaderSize: 100,
- }}},
- expect: 100,
- },
- "multiSites": {
- group: []*SiteConfig{
- {Limits: Limits{MaxRequestHeaderSize: 100}},
- {Limits: Limits{MaxRequestHeaderSize: 50}},
- },
- expect: 50,
- },
- } {
- c := c
- t.Run(name, func(t *testing.T) {
- actual := makeHTTPServerWithHeaderLimit(&http.Server{}, c.group)
- if got := actual.MaxHeaderBytes; got != c.expect {
- t.Errorf("Expect %d, but got %d", c.expect, got)
- }
- })
- }
-}
diff --git a/caddyhttp/httpserver/siteconfig.go b/caddyhttp/httpserver/siteconfig.go
deleted file mode 100644
index 2d2dced2b1d..00000000000
--- a/caddyhttp/httpserver/siteconfig.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package httpserver
-
-import (
- "time"
-
- "github.com/mholt/caddy/caddytls"
-)
-
-// SiteConfig contains information about a site
-// (also known as a virtual host).
-type SiteConfig struct {
- // The address of the site
- Addr Address
-
- // The hostname to bind listener to;
- // defaults to Addr.Host
- ListenHost string
-
- // TLS configuration
- TLS *caddytls.Config
-
- // Uncompiled middleware stack
- middleware []Middleware
-
- // Compiled middleware stack
- middlewareChain Handler
-
- // listener middleware stack
- listenerMiddleware []ListenerMiddleware
-
- // Directory from which to serve files
- Root string
-
- // A list of files to hide (for example, the
- // source Caddyfile). TODO: Enforcing this
- // should be centralized, for example, a
- // standardized way of loading files from disk
- // for a request.
- HiddenFiles []string
-
- // Max request's header/body size
- Limits Limits
-
- // The path to the Caddyfile used to generate this site config
- originCaddyfile string
-
- // These timeout values are used, in conjunction with other
- // site configs on the same server instance, to set the
- // respective timeout values on the http.Server that
- // is created. Sensible values will mitigate slowloris
- // attacks and overcome faulty networks, while still
- // preserving functionality needed for proxying,
- // websockets, etc.
- Timeouts Timeouts
-
- // If true, any requests not matching other site definitions
- // may be served by this site.
- FallbackSite bool
-}
-
-// Timeouts specify various timeouts for a server to use.
-// If the assocated bool field is true, then the duration
-// value should be treated literally (i.e. a zero-value
-// duration would mean "no timeout"). If false, the duration
-// was left unset, so a zero-value duration would mean to
-// use a default value (even if default is non-zero).
-type Timeouts struct {
- ReadTimeout time.Duration
- ReadTimeoutSet bool
- ReadHeaderTimeout time.Duration
- ReadHeaderTimeoutSet bool
- WriteTimeout time.Duration
- WriteTimeoutSet bool
- IdleTimeout time.Duration
- IdleTimeoutSet bool
-}
-
-// Limits specify size limit of request's header and body.
-type Limits struct {
- MaxRequestHeaderSize int64
- MaxRequestBodySizes []PathLimit
-}
-
-// PathLimit is a mapping from a site's path to its corresponding
-// maximum request body size (in bytes)
-type PathLimit struct {
- Path string
- Limit int64
-}
-
-// AddMiddleware adds a middleware to a site's middleware stack.
-func (s *SiteConfig) AddMiddleware(m Middleware) {
- s.middleware = append(s.middleware, m)
-}
-
-// AddListenerMiddleware adds a listener middleware to a site's listenerMiddleware stack.
-func (s *SiteConfig) AddListenerMiddleware(l ListenerMiddleware) {
- s.listenerMiddleware = append(s.listenerMiddleware, l)
-}
-
-// TLSConfig returns s.TLS.
-func (s SiteConfig) TLSConfig() *caddytls.Config {
- return s.TLS
-}
-
-// Host returns s.Addr.Host.
-func (s SiteConfig) Host() string {
- return s.Addr.Host
-}
-
-// Port returns s.Addr.Port.
-func (s SiteConfig) Port() string {
- return s.Addr.Port
-}
-
-// Middleware returns s.middleware (useful for tests).
-func (s SiteConfig) Middleware() []Middleware {
- return s.middleware
-}
-
-// ListenerMiddleware returns s.listenerMiddleware
-func (s SiteConfig) ListenerMiddleware() []ListenerMiddleware {
- return s.listenerMiddleware
-}
diff --git a/caddyhttp/httpserver/tplcontext.go b/caddyhttp/httpserver/tplcontext.go
deleted file mode 100644
index 92256ebf818..00000000000
--- a/caddyhttp/httpserver/tplcontext.go
+++ /dev/null
@@ -1,446 +0,0 @@
-package httpserver
-
-import (
- "bytes"
- "crypto/rand"
- "fmt"
- "io/ioutil"
- mathrand "math/rand"
- "net"
- "net/http"
- "net/url"
- "path"
- "strings"
- "sync"
- "text/template"
- "time"
-
- "os"
-
- "github.com/russross/blackfriday"
-)
-
-// This file contains the context and functions available for
-// use in the templates.
-
-// Context is the context with which Caddy templates are executed.
-type Context struct {
- Root http.FileSystem
- Req *http.Request
- URL *url.URL
- Args []interface{} // defined by arguments to .Include
-
- // just used for adding preload links for server push
- responseHeader http.Header
-}
-
-// NewContextWithHeader creates a context with given response header.
-//
-// To plugin developer:
-// The returned context's exported fileds remain empty,
-// you should then initialize them if you want.
-func NewContextWithHeader(rh http.Header) Context {
- return Context{
- responseHeader: rh,
- }
-}
-
-// Include returns the contents of filename relative to the site root.
-func (c Context) Include(filename string, args ...interface{}) (string, error) {
- c.Args = args
- return ContextInclude(filename, c, c.Root)
-}
-
-// Now returns the current timestamp in the specified format.
-func (c Context) Now(format string) string {
- return time.Now().Format(format)
-}
-
-// NowDate returns the current date/time that can be used
-// in other time functions.
-func (c Context) NowDate() time.Time {
- return time.Now()
-}
-
-// Cookie gets the value of a cookie with name name.
-func (c Context) Cookie(name string) string {
- cookies := c.Req.Cookies()
- for _, cookie := range cookies {
- if cookie.Name == name {
- return cookie.Value
- }
- }
- return ""
-}
-
-// Header gets the value of a request header with field name.
-func (c Context) Header(name string) string {
- return c.Req.Header.Get(name)
-}
-
-// Hostname gets the (remote) hostname of the client making the request.
-func (c Context) Hostname() string {
- ip := c.IP()
-
- hostnameList, err := net.LookupAddr(ip)
- if err != nil || len(hostnameList) == 0 {
- return c.Req.RemoteAddr
- }
-
- return hostnameList[0]
-}
-
-// Env gets a map of the environment variables.
-func (c Context) Env() map[string]string {
- osEnv := os.Environ()
- envVars := make(map[string]string, len(osEnv))
- for _, env := range osEnv {
- data := strings.SplitN(env, "=", 2)
- if len(data) == 2 && len(data[0]) > 0 {
- envVars[data[0]] = data[1]
- }
- }
- return envVars
-}
-
-// IP gets the (remote) IP address of the client making the request.
-func (c Context) IP() string {
- ip, _, err := net.SplitHostPort(c.Req.RemoteAddr)
- if err != nil {
- return c.Req.RemoteAddr
- }
- return ip
-}
-
-// To mock the net.InterfaceAddrs from the test.
-var networkInterfacesFn = net.InterfaceAddrs
-
-// ServerIP gets the (local) IP address of the server.
-// TODO: The bind directive should be honored in this method (see PR #1474).
-func (c Context) ServerIP() string {
- addrs, err := networkInterfacesFn()
- if err != nil {
- return ""
- }
-
- for _, address := range addrs {
- // Validate the address and check if it's not a loopback
- if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
- if ipnet.IP.To4() != nil || ipnet.IP.To16() != nil {
- return ipnet.IP.String()
- }
- }
- }
-
- return ""
-}
-
-// URI returns the raw, unprocessed request URI (including query
-// string and hash) obtained directly from the Request-Line of
-// the HTTP request.
-func (c Context) URI() string {
- return c.Req.RequestURI
-}
-
-// Host returns the hostname portion of the Host header
-// from the HTTP request.
-func (c Context) Host() (string, error) {
- host, _, err := net.SplitHostPort(c.Req.Host)
- if err != nil {
- if !strings.Contains(c.Req.Host, ":") {
- // common with sites served on the default port 80
- return c.Req.Host, nil
- }
- return "", err
- }
- return host, nil
-}
-
-// Port returns the port portion of the Host header if specified.
-func (c Context) Port() (string, error) {
- _, port, err := net.SplitHostPort(c.Req.Host)
- if err != nil {
- if !strings.Contains(c.Req.Host, ":") {
- // common with sites served on the default port 80
- return HTTPPort, nil
- }
- return "", err
- }
- return port, nil
-}
-
-// Method returns the method (GET, POST, etc.) of the request.
-func (c Context) Method() string {
- return c.Req.Method
-}
-
-// PathMatches returns true if the path portion of the request
-// URL matches pattern.
-func (c Context) PathMatches(pattern string) bool {
- return Path(c.Req.URL.Path).Matches(pattern)
-}
-
-// Truncate truncates the input string to the given length.
-// If length is negative, it returns that many characters
-// starting from the end of the string. If the absolute value
-// of length is greater than len(input), the whole input is
-// returned.
-func (c Context) Truncate(input string, length int) string {
- if length < 0 && len(input)+length > 0 {
- return input[len(input)+length:]
- }
- if length >= 0 && len(input) > length {
- return input[:length]
- }
- return input
-}
-
-// StripHTML returns s without HTML tags. It is fairly naive
-// but works with most valid HTML inputs.
-func (c Context) StripHTML(s string) string {
- var buf bytes.Buffer
- var inTag, inQuotes bool
- var tagStart int
- for i, ch := range s {
- if inTag {
- if ch == '>' && !inQuotes {
- inTag = false
- } else if ch == '<' && !inQuotes {
- // false start
- buf.WriteString(s[tagStart:i])
- tagStart = i
- } else if ch == '"' {
- inQuotes = !inQuotes
- }
- continue
- }
- if ch == '<' {
- inTag = true
- tagStart = i
- continue
- }
- buf.WriteRune(ch)
- }
- if inTag {
- // false start
- buf.WriteString(s[tagStart:])
- }
- return buf.String()
-}
-
-// Ext returns the suffix beginning at the final dot in the final
-// slash-separated element of the pathStr (or in other words, the
-// file extension).
-func (c Context) Ext(pathStr string) string {
- return path.Ext(pathStr)
-}
-
-// StripExt returns the input string without the extension,
-// which is the suffix starting with the final '.' character
-// but not before the final path separator ('/') character.
-// If there is no extension, the whole input is returned.
-func (c Context) StripExt(path string) string {
- for i := len(path) - 1; i >= 0 && path[i] != '/'; i-- {
- if path[i] == '.' {
- return path[:i]
- }
- }
- return path
-}
-
-// Replace replaces instances of find in input with replacement.
-func (c Context) Replace(input, find, replacement string) string {
- return strings.Replace(input, find, replacement, -1)
-}
-
-// Markdown returns the HTML contents of the markdown contained in filename
-// (relative to the site root).
-func (c Context) Markdown(filename string) (string, error) {
- body, err := c.Include(filename)
- if err != nil {
- return "", err
- }
- renderer := blackfriday.HtmlRenderer(0, "", "")
- extns := 0
- extns |= blackfriday.EXTENSION_TABLES
- extns |= blackfriday.EXTENSION_FENCED_CODE
- extns |= blackfriday.EXTENSION_STRIKETHROUGH
- extns |= blackfriday.EXTENSION_DEFINITION_LISTS
- markdown := blackfriday.Markdown([]byte(body), renderer, extns)
-
- return string(markdown), nil
-}
-
-// ContextInclude opens filename using fs and executes a template with the context ctx.
-// This does the same thing that Context.Include() does, but with the ability to provide
-// your own context so that the included files can have access to additional fields your
-// type may provide. You can embed Context in your type, then override its Include method
-// to call this function with ctx being the instance of your type, and fs being Context.Root.
-func ContextInclude(filename string, ctx interface{}, fs http.FileSystem) (string, error) {
- file, err := fs.Open(filename)
- if err != nil {
- return "", err
- }
- defer file.Close()
-
- body, err := ioutil.ReadAll(file)
- if err != nil {
- return "", err
- }
-
- tpl, err := template.New(filename).Funcs(TemplateFuncs).Parse(string(body))
- if err != nil {
- return "", err
- }
-
- buf := includeBufs.Get().(*bytes.Buffer)
- buf.Reset()
- defer includeBufs.Put(buf)
- err = tpl.Execute(buf, ctx)
- if err != nil {
- return "", err
- }
-
- return buf.String(), nil
-}
-
-// ToLower will convert the given string to lower case.
-func (c Context) ToLower(s string) string {
- return strings.ToLower(s)
-}
-
-// ToUpper will convert the given string to upper case.
-func (c Context) ToUpper(s string) string {
- return strings.ToUpper(s)
-}
-
-// Split is a pass-through to strings.Split. It will split the first argument at each instance of the separator and return a slice of strings.
-func (c Context) Split(s string, sep string) []string {
- return strings.Split(s, sep)
-}
-
-// Join is a pass-through to strings.Join. It will join the first argument slice with the separator in the second argument and return the result.
-func (c Context) Join(a []string, sep string) string {
- return strings.Join(a, sep)
-}
-
-// Slice will convert the given arguments into a slice.
-func (c Context) Slice(elems ...interface{}) []interface{} {
- return elems
-}
-
-// Map will convert the arguments into a map. It expects alternating string keys and values. This is useful for building more complicated data structures
-// if you are using subtemplates or things like that.
-func (c Context) Map(values ...interface{}) (map[string]interface{}, error) {
- if len(values)%2 != 0 {
- return nil, fmt.Errorf("Map expects an even number of arguments")
- }
- dict := make(map[string]interface{}, len(values)/2)
- for i := 0; i < len(values); i += 2 {
- key, ok := values[i].(string)
- if !ok {
- return nil, fmt.Errorf("Map keys must be strings")
- }
- dict[key] = values[i+1]
- }
- return dict, nil
-}
-
-// Files reads and returns a slice of names from the given directory
-// relative to the root of Context c.
-func (c Context) Files(name string) ([]string, error) {
- dir, err := c.Root.Open(path.Clean(name))
- if err != nil {
- return nil, err
- }
- defer dir.Close()
-
- stat, err := dir.Stat()
- if err != nil {
- return nil, err
- }
- if !stat.IsDir() {
- return nil, fmt.Errorf("%v is not a directory", name)
- }
-
- dirInfo, err := dir.Readdir(0)
- if err != nil {
- return nil, err
- }
-
- names := make([]string, len(dirInfo))
- for i, fileInfo := range dirInfo {
- names[i] = fileInfo.Name()
- }
-
- return names, nil
-}
-
-// IsMITM returns true if it seems likely that the TLS connection
-// is being intercepted.
-func (c Context) IsMITM() bool {
- if val, ok := c.Req.Context().Value(MitmCtxKey).(bool); ok {
- return val
- }
- return false
-}
-
-// RandomString generates a random string of random length given
-// length bounds. Thanks to http://stackoverflow.com/a/35615565/1048862
-// for the clever technique that is fairly fast, secure, and maintains
-// proper distributions over the dictionary.
-func (c Context) RandomString(minLen, maxLen int) string {
- const (
- letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
- letterIdxBits = 6 // 6 bits to represent 64 possibilities (indexes)
- letterIdxMask = 1<\n
str1
\n
str2
\n\n",
- },
- }
-
- for i, test := range tests {
- testPrefix := getTestPrefix(i)
-
- // WriteFile truncates the contentt
- err := ioutil.WriteFile(absInFilePath, []byte(test.fileContent), os.ModePerm)
- if err != nil {
- t.Fatal(testPrefix+"Failed to create test file. Error was: %v", err)
- }
-
- content, _ := context.Markdown(inputFilename)
- if content != test.expectedContent {
- t.Errorf(testPrefix+"Expected content [%s] but found [%s]. Input file was: %s", test.expectedContent, content, inputFilename)
- }
- }
-}
-
-func TestCookie(t *testing.T) {
-
- tests := []struct {
- cookie *http.Cookie
- cookieName string
- expectedValue string
- }{
- // Test 0 - happy path
- {
- cookie: &http.Cookie{Name: "cookieName", Value: "cookieValue"},
- cookieName: "cookieName",
- expectedValue: "cookieValue",
- },
- // Test 1 - try to get a non-existing cookie
- {
- cookie: &http.Cookie{Name: "cookieName", Value: "cookieValue"},
- cookieName: "notExisting",
- expectedValue: "",
- },
- // Test 2 - partial name match
- {
- cookie: &http.Cookie{Name: "cookie", Value: "cookieValue"},
- cookieName: "cook",
- expectedValue: "",
- },
- // Test 3 - cookie with optional fields
- {
- cookie: &http.Cookie{Name: "cookie", Value: "cookieValue", Path: "/path", Domain: "https://localhost", Expires: (time.Now().Add(10 * time.Minute)), MaxAge: 120},
- cookieName: "cookie",
- expectedValue: "cookieValue",
- },
- }
-
- for i, test := range tests {
- testPrefix := getTestPrefix(i)
-
- // reinitialize the context for each test
- context := getContextOrFail(t)
-
- context.Req.AddCookie(test.cookie)
-
- actualCookieVal := context.Cookie(test.cookieName)
-
- if actualCookieVal != test.expectedValue {
- t.Errorf(testPrefix+"Expected cookie value [%s] but found [%s] for cookie with name %s", test.expectedValue, actualCookieVal, test.cookieName)
- }
- }
-}
-
-func TestCookieMultipleCookies(t *testing.T) {
- context := getContextOrFail(t)
-
- cookieNameBase, cookieValueBase := "cookieName", "cookieValue"
-
- // make sure that there's no state and multiple requests for different cookies return the correct result
- for i := 0; i < 10; i++ {
- context.Req.AddCookie(&http.Cookie{Name: fmt.Sprintf("%s%d", cookieNameBase, i), Value: fmt.Sprintf("%s%d", cookieValueBase, i)})
- }
-
- for i := 0; i < 10; i++ {
- expectedCookieVal := fmt.Sprintf("%s%d", cookieValueBase, i)
- actualCookieVal := context.Cookie(fmt.Sprintf("%s%d", cookieNameBase, i))
- if actualCookieVal != expectedCookieVal {
- t.Fatalf("Expected cookie value %s, found %s", expectedCookieVal, actualCookieVal)
- }
- }
-}
-
-func TestHeader(t *testing.T) {
- context := getContextOrFail(t)
-
- headerKey, headerVal := "Header1", "HeaderVal1"
- context.Req.Header.Add(headerKey, headerVal)
-
- actualHeaderVal := context.Header(headerKey)
- if actualHeaderVal != headerVal {
- t.Errorf("Expected header %s, found %s", headerVal, actualHeaderVal)
- }
-
- missingHeaderVal := context.Header("not-existing")
- if missingHeaderVal != "" {
- t.Errorf("Expected empty header value, found %s", missingHeaderVal)
- }
-}
-
-func TestHostname(t *testing.T) {
- context := getContextOrFail(t)
-
- tests := []struct {
- inputRemoteAddr string
- expectedHostname string
- }{
- // TODO(mholt): Fix these tests, they're not portable. i.e. my resolver
- // returns "fwdr-8.fwdr-8.fwdr-8.fwdr-8." instead of these google ones.
- // Test 0 - ipv4 with port
- // {"8.8.8.8:1111", "google-public-dns-a.google.com."},
- // // Test 1 - ipv4 without port
- // {"8.8.8.8", "google-public-dns-a.google.com."},
- // // Test 2 - ipv6 with port
- // {"[2001:4860:4860::8888]:11", "google-public-dns-a.google.com."},
- // // Test 3 - ipv6 without port and brackets
- // {"2001:4860:4860::8888", "google-public-dns-a.google.com."},
- // Test 4 - no hostname available
- {"1.1.1.1", "1.1.1.1"},
- }
-
- for i, test := range tests {
- testPrefix := getTestPrefix(i)
-
- context.Req.RemoteAddr = test.inputRemoteAddr
- actualHostname := context.Hostname()
-
- if actualHostname != test.expectedHostname {
- t.Errorf(testPrefix+"Expected hostname %s, found %s", test.expectedHostname, actualHostname)
- }
- }
-}
-
-func TestEnv(t *testing.T) {
- context := getContextOrFail(t)
-
- name := "ENV_TEST_NAME"
- testValue := "TEST_VALUE"
- os.Setenv(name, testValue)
-
- notExisting := "ENV_TEST_NOT_EXISTING"
- os.Unsetenv(notExisting)
-
- invalidName := "ENV_TEST_INVALID_NAME"
- os.Setenv("="+invalidName, testValue)
-
- env := context.Env()
- if value := env[name]; value != testValue {
- t.Errorf("Expected env-variable %s value '%s', found '%s'",
- name, testValue, value)
- }
-
- if value, ok := env[notExisting]; ok {
- t.Errorf("Expected empty env-variable %s, found '%s'",
- notExisting, value)
- }
-
- for k, v := range env {
- if strings.Contains(k, invalidName) {
- t.Errorf("Expected invalid name not to be included in Env %s, found in key '%s'", invalidName, k)
- }
- if strings.Contains(v, invalidName) {
- t.Errorf("Expected invalid name not be be included in Env %s, found in value '%s'", invalidName, v)
- }
- }
-
- os.Unsetenv("=" + invalidName)
-}
-
-func TestIP(t *testing.T) {
- context := getContextOrFail(t)
-
- tests := []struct {
- inputRemoteAddr string
- expectedIP string
- }{
- // Test 0 - ipv4 with port
- {"1.1.1.1:1111", "1.1.1.1"},
- // Test 1 - ipv4 without port
- {"1.1.1.1", "1.1.1.1"},
- // Test 2 - ipv6 with port
- {"[::1]:11", "::1"},
- // Test 3 - ipv6 without port and brackets
- {"[2001:db8:a0b:12f0::1]", "[2001:db8:a0b:12f0::1]"},
- // Test 4 - ipv6 with zone and port
- {`[fe80:1::3%eth0]:44`, `fe80:1::3%eth0`},
- }
-
- for i, test := range tests {
- testPrefix := getTestPrefix(i)
-
- context.Req.RemoteAddr = test.inputRemoteAddr
- actualIP := context.IP()
-
- if actualIP != test.expectedIP {
- t.Errorf(testPrefix+"Expected IP %s, found %s", test.expectedIP, actualIP)
- }
- }
-}
-
-type myIP string
-
-func (ip myIP) mockInterfaces() ([]net.Addr, error) {
- a := net.ParseIP(string(ip))
-
- return []net.Addr{
- &net.IPNet{IP: a, Mask: nil},
- }, nil
-}
-
-func TestServerIP(t *testing.T) {
- context := getContextOrFail(t)
-
- tests := []string{
- // Test 0 - ipv4
- "1.1.1.1",
- // Test 1 - ipv6
- "2001:db8:a0b:12f0::1",
- }
-
- for i, expectedIP := range tests {
- testPrefix := getTestPrefix(i)
-
- // Mock the network interface
- ip := myIP(expectedIP)
- networkInterfacesFn = ip.mockInterfaces
- defer func() {
- networkInterfacesFn = net.InterfaceAddrs
- }()
-
- actualIP := context.ServerIP()
-
- if actualIP != expectedIP {
- t.Errorf("%sExpected IP \"%s\", found \"%s\".", testPrefix, expectedIP, actualIP)
- }
- }
-}
-
-func TestURL(t *testing.T) {
- context := getContextOrFail(t)
-
- inputURL := "http://localhost"
- context.Req.RequestURI = inputURL
-
- if inputURL != context.URI() {
- t.Errorf("Expected url %s, found %s", inputURL, context.URI())
- }
-}
-
-func TestHost(t *testing.T) {
- tests := []struct {
- input string
- expectedHost string
- shouldErr bool
- }{
- {
- input: "localhost:123",
- expectedHost: "localhost",
- shouldErr: false,
- },
- {
- input: "localhost",
- expectedHost: "localhost",
- shouldErr: false,
- },
- {
- input: "[::]",
- expectedHost: "",
- shouldErr: true,
- },
- }
-
- for _, test := range tests {
- testHostOrPort(t, true, test.input, test.expectedHost, test.shouldErr)
- }
-}
-
-func TestPort(t *testing.T) {
- tests := []struct {
- input string
- expectedPort string
- shouldErr bool
- }{
- {
- input: "localhost:123",
- expectedPort: "123",
- shouldErr: false,
- },
- {
- input: "localhost",
- expectedPort: "80", // assuming 80 is the default port
- shouldErr: false,
- },
- {
- input: ":8080",
- expectedPort: "8080",
- shouldErr: false,
- },
- {
- input: "[::]",
- expectedPort: "",
- shouldErr: true,
- },
- }
-
- for _, test := range tests {
- testHostOrPort(t, false, test.input, test.expectedPort, test.shouldErr)
- }
-}
-
-func testHostOrPort(t *testing.T, isTestingHost bool, input, expectedResult string, shouldErr bool) {
- context := getContextOrFail(t)
-
- context.Req.Host = input
- var actualResult, testedObject string
- var err error
-
- if isTestingHost {
- actualResult, err = context.Host()
- testedObject = "host"
- } else {
- actualResult, err = context.Port()
- testedObject = "port"
- }
-
- if shouldErr && err == nil {
- t.Errorf("Expected error, found nil!")
- return
- }
-
- if !shouldErr && err != nil {
- t.Errorf("Expected no error, found %s", err)
- return
- }
-
- if actualResult != expectedResult {
- t.Errorf("Expected %s %s, found %s", testedObject, expectedResult, actualResult)
- }
-}
-
-func TestMethod(t *testing.T) {
- context := getContextOrFail(t)
-
- method := "POST"
- context.Req.Method = method
-
- if method != context.Method() {
- t.Errorf("Expected method %s, found %s", method, context.Method())
- }
-
-}
-
-func TestContextPathMatches(t *testing.T) {
- context := getContextOrFail(t)
-
- tests := []struct {
- urlStr string
- pattern string
- shouldMatch bool
- }{
- // Test 0
- {
- urlStr: "http://localhost/",
- pattern: "",
- shouldMatch: true,
- },
- // Test 1
- {
- urlStr: "http://localhost",
- pattern: "",
- shouldMatch: true,
- },
- // Test 1
- {
- urlStr: "http://localhost/",
- pattern: "/",
- shouldMatch: true,
- },
- // Test 3
- {
- urlStr: "http://localhost/?param=val",
- pattern: "/",
- shouldMatch: true,
- },
- // Test 4
- {
- urlStr: "http://localhost/dir1/dir2",
- pattern: "/dir2",
- shouldMatch: false,
- },
- // Test 5
- {
- urlStr: "http://localhost/dir1/dir2",
- pattern: "/dir1",
- shouldMatch: true,
- },
- // Test 6
- {
- urlStr: "http://localhost:444/dir1/dir2",
- pattern: "/dir1",
- shouldMatch: true,
- },
- // Test 7
- {
- urlStr: "http://localhost/dir1/dir2",
- pattern: "*/dir2",
- shouldMatch: false,
- },
- }
-
- for i, test := range tests {
- testPrefix := getTestPrefix(i)
- var err error
- context.Req.URL, err = url.Parse(test.urlStr)
- if err != nil {
- t.Fatalf("Failed to prepare test URL from string %s! Error was: %s", test.urlStr, err)
- }
-
- matches := context.PathMatches(test.pattern)
- if matches != test.shouldMatch {
- t.Errorf(testPrefix+"Expected and actual result differ: expected to match [%t], actual matches [%t]", test.shouldMatch, matches)
- }
- }
-}
-
-func TestTruncate(t *testing.T) {
- context := getContextOrFail(t)
- tests := []struct {
- inputString string
- inputLength int
- expected string
- }{
- // Test 0 - small length
- {
- inputString: "string",
- inputLength: 1,
- expected: "s",
- },
- // Test 1 - exact length
- {
- inputString: "string",
- inputLength: 6,
- expected: "string",
- },
- // Test 2 - bigger length
- {
- inputString: "string",
- inputLength: 10,
- expected: "string",
- },
- // Test 3 - zero length
- {
- inputString: "string",
- inputLength: 0,
- expected: "",
- },
- // Test 4 - negative, smaller length
- {
- inputString: "string",
- inputLength: -5,
- expected: "tring",
- },
- // Test 5 - negative, exact length
- {
- inputString: "string",
- inputLength: -6,
- expected: "string",
- },
- // Test 6 - negative, bigger length
- {
- inputString: "string",
- inputLength: -7,
- expected: "string",
- },
- }
-
- for i, test := range tests {
- actual := context.Truncate(test.inputString, test.inputLength)
- if actual != test.expected {
- t.Errorf(getTestPrefix(i)+"Expected '%s', found '%s'. Input was Truncate(%q, %d)", test.expected, actual, test.inputString, test.inputLength)
- }
- }
-}
-
-func TestStripHTML(t *testing.T) {
- context := getContextOrFail(t)
- tests := []struct {
- input string
- expected string
- }{
- // Test 0 - no tags
- {
- input: `h1`,
- expected: `h1`,
- },
- // Test 1 - happy path
- {
- input: `
h1
`,
- expected: `h1`,
- },
- // Test 2 - tag in quotes
- {
- input: `
\n", get(); expect != got {
- t.Fatalf("Expected body:\n%q\nbut got:\n%q", expect, got)
- }
-
-}
diff --git a/caddyhttp/markdown/metadata/metadata.go b/caddyhttp/markdown/metadata/metadata.go
deleted file mode 100644
index a5e1c370685..00000000000
--- a/caddyhttp/markdown/metadata/metadata.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package metadata
-
-import (
- "bufio"
- "bytes"
- "time"
-)
-
-var (
- // Date format YYYY-MM-DD HH:MM:SS or YYYY-MM-DD
- timeLayout = []string{
- `2006-01-02 15:04:05-0700`,
- `2006-01-02 15:04:05`,
- `2006-01-02`,
- }
-)
-
-// Metadata stores a page's metadata
-type Metadata struct {
- // Page title
- Title string
-
- // Page template
- Template string
-
- // Publish date
- Date time.Time
-
- // Variables to be used with Template
- Variables map[string]interface{}
-}
-
-// NewMetadata returns a new Metadata struct, loaded with the given map
-func NewMetadata(parsedMap map[string]interface{}) Metadata {
- md := Metadata{
- Variables: make(map[string]interface{}),
- }
- md.load(parsedMap)
-
- return md
-}
-
-// load loads parsed values in parsedMap into Metadata
-func (m *Metadata) load(parsedMap map[string]interface{}) {
-
- // Pull top level things out
- if title, ok := parsedMap["title"]; ok {
- m.Title, _ = title.(string)
- }
- if template, ok := parsedMap["template"]; ok {
- m.Template, _ = template.(string)
- }
- if date, ok := parsedMap["date"].(string); ok {
- for _, layout := range timeLayout {
- if t, err := time.Parse(layout, date); err == nil {
- m.Date = t
- break
- }
- }
- }
-
- m.Variables = parsedMap
-}
-
-// Parser is a an interface that must be satisfied by each parser
-type Parser interface {
- // Initialize a parser
- Init(b *bytes.Buffer) bool
-
- // Type of metadata
- Type() string
-
- // Parsed metadata.
- Metadata() Metadata
-
- // Raw markdown.
- Markdown() []byte
-}
-
-// GetParser returns a parser for the given data
-func GetParser(buf []byte) Parser {
- for _, p := range parsers() {
- b := bytes.NewBuffer(buf)
- if p.Init(b) {
- return p
- }
- }
-
- return nil
-}
-
-// parsers returns all available parsers
-func parsers() []Parser {
- return []Parser{
- &TOMLParser{},
- &YAMLParser{},
- &JSONParser{},
-
- // This one must be last
- &NoneParser{},
- }
-}
-
-// Split out prefixed/suffixed metadata with given delimiter
-func splitBuffer(b *bytes.Buffer, delim string) (*bytes.Buffer, *bytes.Buffer) {
- scanner := bufio.NewScanner(b)
-
- // Read and check first line
- if !scanner.Scan() {
- return nil, nil
- }
- if string(bytes.TrimSpace(scanner.Bytes())) != delim {
- return nil, nil
- }
-
- // Accumulate metadata, until delimiter
- meta := bytes.NewBuffer(nil)
- for scanner.Scan() {
- if string(bytes.TrimSpace(scanner.Bytes())) == delim {
- break
- }
- if _, err := meta.Write(scanner.Bytes()); err != nil {
- return nil, nil
- }
- if _, err := meta.WriteRune('\n'); err != nil {
- return nil, nil
- }
- }
- // Make sure we saw closing delimiter
- if string(bytes.TrimSpace(scanner.Bytes())) != delim {
- return nil, nil
- }
-
- // The rest is markdown
- markdown := new(bytes.Buffer)
- for scanner.Scan() {
- if _, err := markdown.Write(scanner.Bytes()); err != nil {
- return nil, nil
- }
- if _, err := markdown.WriteRune('\n'); err != nil {
- return nil, nil
- }
- }
-
- return meta, markdown
-}
diff --git a/caddyhttp/markdown/metadata/metadata_json.go b/caddyhttp/markdown/metadata/metadata_json.go
deleted file mode 100644
index 61343f287ac..00000000000
--- a/caddyhttp/markdown/metadata/metadata_json.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package metadata
-
-import (
- "bytes"
- "encoding/json"
-)
-
-// JSONParser is the MetadataParser for JSON
-type JSONParser struct {
- metadata Metadata
- markdown *bytes.Buffer
-}
-
-// Type returns the kind of metadata parser implemented by this struct.
-func (j *JSONParser) Type() string {
- return "JSON"
-}
-
-// Init prepares the metadata metadata/markdown file and parses it
-func (j *JSONParser) Init(b *bytes.Buffer) bool {
- m := make(map[string]interface{})
-
- err := json.Unmarshal(b.Bytes(), &m)
- if err != nil {
- var offset int
-
- jerr, ok := err.(*json.SyntaxError)
- if !ok {
- return false
- }
-
- offset = int(jerr.Offset)
-
- m = make(map[string]interface{})
- err = json.Unmarshal(b.Next(offset-1), &m)
- if err != nil {
- return false
- }
- }
-
- j.metadata = NewMetadata(m)
- j.markdown = bytes.NewBuffer(b.Bytes())
-
- return true
-}
-
-// Metadata returns parsed metadata. It should be called
-// only after a call to Parse returns without error.
-func (j *JSONParser) Metadata() Metadata {
- return j.metadata
-}
-
-// Markdown returns the markdown text. It should be called only after a call to Parse returns without error.
-func (j *JSONParser) Markdown() []byte {
- return j.markdown.Bytes()
-}
diff --git a/caddyhttp/markdown/metadata/metadata_none.go b/caddyhttp/markdown/metadata/metadata_none.go
deleted file mode 100644
index e87bfb43e69..00000000000
--- a/caddyhttp/markdown/metadata/metadata_none.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package metadata
-
-import (
- "bytes"
-)
-
-// NoneParser is the parser for plaintext markdown with no metadata.
-type NoneParser struct {
- metadata Metadata
- markdown *bytes.Buffer
-}
-
-// Type returns the kind of parser this struct is.
-func (n *NoneParser) Type() string {
- return "None"
-}
-
-// Init prepases and parses the metadata and markdown file
-func (n *NoneParser) Init(b *bytes.Buffer) bool {
- m := make(map[string]interface{})
- n.metadata = NewMetadata(m)
- n.markdown = bytes.NewBuffer(b.Bytes())
-
- return true
-}
-
-// Parse the metadata
-func (n *NoneParser) Parse(b []byte) ([]byte, error) {
- return nil, nil
-}
-
-// Metadata returns parsed metadata. It should be called
-// only after a call to Parse returns without error.
-func (n *NoneParser) Metadata() Metadata {
- return n.metadata
-}
-
-// Markdown returns parsed markdown. It should be called
-// only after a call to Parse returns without error.
-func (n *NoneParser) Markdown() []byte {
- return n.markdown.Bytes()
-}
diff --git a/caddyhttp/markdown/metadata/metadata_test.go b/caddyhttp/markdown/metadata/metadata_test.go
deleted file mode 100644
index a5663d55f8c..00000000000
--- a/caddyhttp/markdown/metadata/metadata_test.go
+++ /dev/null
@@ -1,283 +0,0 @@
-package metadata
-
-import (
- "bytes"
- "fmt"
- "strings"
- "testing"
-)
-
-func check(t *testing.T, err error) {
- if err != nil {
- t.Fatal(err)
- }
-}
-
-var TOML = [5]string{`
-title = "A title"
-template = "default"
-name = "value"
-positive = true
-negative = false
-number = 1410
-float = 1410.07
-`,
- `+++
-title = "A title"
-template = "default"
-name = "value"
-positive = true
-negative = false
-number = 1410
-float = 1410.07
-+++
-Page content
- `,
- `+++
-title = "A title"
-template = "default"
-name = "value"
-positive = true
-negative = false
-number = 1410
-float = 1410.07
- `,
- `title = "A title" template = "default" [variables] name = "value"`,
- `+++
-title = "A title"
-template = "default"
-name = "value"
-positive = true
-negative = false
-number = 1410
-float = 1410.07
-+++
-`,
-}
-
-var YAML = [5]string{`
-title : A title
-template : default
-name : value
-positive : true
-negative : false
-number : 1410
-float : 1410.07
-`,
- `---
-title : A title
-template : default
-name : value
-positive : true
-negative : false
-number : 1410
-float : 1410.07
----
- Page content
- `,
- `---
-title : A title
-template : default
-name : value
-number : 1410
-float : 1410.07
- `,
- `title : A title template : default variables : name : value : positive : true : negative : false`,
- `---
-title : A title
-template : default
-name : value
-positive : true
-negative : false
-number : 1410
-float : 1410.07
----
-`,
-}
-
-var JSON = [5]string{`
- "title" : "A title",
- "template" : "default",
- "name" : "value",
- "positive" : true,
- "negative" : false,
- "number": 1410,
- "float": 1410.07
-`,
- `{
- "title" : "A title",
- "template" : "default",
- "name" : "value",
- "positive" : true,
- "negative" : false,
- "number" : 1410,
- "float": 1410.07
-}
-Page content
- `,
- `
-{
- "title" : "A title",
- "template" : "default",
- "name" : "value",
- "positive" : true,
- "negative" : false,
- "number" : 1410,
- "float": 1410.07
- `,
- `
-{
- "title" :: "A title",
- "template" : "default",
- "name" : "value",
- "positive" : true,
- "negative" : false,
- "number" : 1410,
- "float": 1410.07
-}
- `,
- `{
- "title" : "A title",
- "template" : "default",
- "name" : "value",
- "positive" : true,
- "negative" : false,
- "number" : 1410,
- "float": 1410.07
-}
-`,
-}
-
-func TestParsers(t *testing.T) {
- expected := Metadata{
- Title: "A title",
- Template: "default",
- Variables: map[string]interface{}{
- "name": "value",
- "title": "A title",
- "template": "default",
- "number": 1410,
- "float": 1410.07,
- "positive": true,
- "negative": false,
- },
- }
- compare := func(m Metadata) bool {
- if m.Title != expected.Title {
- return false
- }
- if m.Template != expected.Template {
- return false
- }
- for k, v := range m.Variables {
- if fmt.Sprintf("%v", v) != fmt.Sprintf("%v", expected.Variables[k]) {
- return false
- }
- }
-
- varLenOK := len(m.Variables) == len(expected.Variables)
- return varLenOK
- }
-
- data := []struct {
- parser Parser
- testData [5]string
- name string
- }{
- {&JSONParser{}, JSON, "JSON"},
- {&YAMLParser{}, YAML, "YAML"},
- {&TOMLParser{}, TOML, "TOML"},
- }
-
- for _, v := range data {
- // metadata without identifiers
- if v.parser.Init(bytes.NewBufferString(v.testData[0])) {
- t.Fatalf("Expected error for invalid metadata for %v", v.name)
- }
-
- // metadata with identifiers
- if !v.parser.Init(bytes.NewBufferString(v.testData[1])) {
- t.Fatalf("Metadata failed to initialize, type %v", v.parser.Type())
- }
- md := v.parser.Markdown()
- if !compare(v.parser.Metadata()) {
- t.Fatalf("Expected %v, found %v for %v", expected, v.parser.Metadata(), v.name)
- }
- if "Page content" != strings.TrimSpace(string(md)) {
- t.Fatalf("Expected %v, found %v for %v", "Page content", string(md), v.name)
- }
- // Check that we find the correct metadata parser type
- if p := GetParser([]byte(v.testData[1])); p.Type() != v.name {
- t.Fatalf("Wrong parser found, expected %v, found %v", v.name, p.Type())
- }
-
- // metadata without closing identifier
- if v.parser.Init(bytes.NewBufferString(v.testData[2])) {
- t.Fatalf("Expected error for missing closing identifier for %v parser", v.name)
- }
-
- // invalid metadata
- if v.parser.Init(bytes.NewBufferString(v.testData[3])) {
- t.Fatalf("Expected error for invalid metadata for %v", v.name)
- }
-
- // front matter but no body
- if !v.parser.Init(bytes.NewBufferString(v.testData[4])) {
- t.Fatalf("Unexpected error for valid metadata but no body for %v", v.name)
- }
- }
-}
-
-func TestLargeBody(t *testing.T) {
-
- var JSON = `{
-"template": "chapter"
-}
-
-Mycket olika byggnader har man i de nordiska rikena: pyramidformiga, kilformiga, välvda, runda och fyrkantiga. De pyramidformiga består helt enkelt av träribbor, som upptill löper samman och nedtill bildar en vidare krets; de är avsedda att användas av hantverkarna under sommaren, för att de inte ska plågas av solen, på samma gång som de besväras av rök och eld. De kilformiga husen är i regel försedda med höga tak, för att de täta och tunga snömassorna fortare ska kunna blåsa av och inte tynga ned taken. Dessa är täckta av björknäver, tegel eller kluvet spån av furu - för kådans skull -, gran, ek eller bok; taken på de förmögnas hus däremot med plåtar av koppar eller bly, i likhet med kyrktaken. Valvbyggnaderna uppförs ganska konstnärligt till skydd mot våldsamma vindar och snöfall, görs av sten eller trä, och är avsedda för olika alldagliga viktiga ändamål. Liknande byggnader kan finnas i stormännens gårdar där de används som förvaringsrum för husgeråd och jordbruksredskap. De runda byggnaderna - som för övrigt är de högst sällsynta - används av konstnärer, som vid sitt arbete behöver ett jämnt fördelat ljus från taket. Vanligast är de fyrkantiga husen, vars grova bjälkar är synnerligen väl hopfogade i hörnen - ett sant mästerverk av byggnadskonst; även dessa har fönster högt uppe i taken, för att dagsljuset skall kunna strömma in och ge alla därinne full belysning. Stenhusen har dörröppningar i förhållande till byggnadens storlek, men smala fönstergluggar, som skydd mot den stränga kölden, frosten och snön. Vore de större och vidare, såsom fönstren i Italien, skulle husen i följd av den fint yrande snön, som röres upp av den starka blåsten, precis som dammet av virvelvinden, snart nog fyllas med massor av snö och inte kunna stå emot dess tryck, utan störta samman.
-
- `
- var TOML = `+++
-template = "chapter"
-+++
-
-Mycket olika byggnader har man i de nordiska rikena: pyramidformiga, kilformiga, välvda, runda och fyrkantiga. De pyramidformiga består helt enkelt av träribbor, som upptill löper samman och nedtill bildar en vidare krets; de är avsedda att användas av hantverkarna under sommaren, för att de inte ska plågas av solen, på samma gång som de besväras av rök och eld. De kilformiga husen är i regel försedda med höga tak, för att de täta och tunga snömassorna fortare ska kunna blåsa av och inte tynga ned taken. Dessa är täckta av björknäver, tegel eller kluvet spån av furu - för kådans skull -, gran, ek eller bok; taken på de förmögnas hus däremot med plåtar av koppar eller bly, i likhet med kyrktaken. Valvbyggnaderna uppförs ganska konstnärligt till skydd mot våldsamma vindar och snöfall, görs av sten eller trä, och är avsedda för olika alldagliga viktiga ändamål. Liknande byggnader kan finnas i stormännens gårdar där de används som förvaringsrum för husgeråd och jordbruksredskap. De runda byggnaderna - som för övrigt är de högst sällsynta - används av konstnärer, som vid sitt arbete behöver ett jämnt fördelat ljus från taket. Vanligast är de fyrkantiga husen, vars grova bjälkar är synnerligen väl hopfogade i hörnen - ett sant mästerverk av byggnadskonst; även dessa har fönster högt uppe i taken, för att dagsljuset skall kunna strömma in och ge alla därinne full belysning. Stenhusen har dörröppningar i förhållande till byggnadens storlek, men smala fönstergluggar, som skydd mot den stränga kölden, frosten och snön. Vore de större och vidare, såsom fönstren i Italien, skulle husen i följd av den fint yrande snön, som röres upp av den starka blåsten, precis som dammet av virvelvinden, snart nog fyllas med massor av snö och inte kunna stå emot dess tryck, utan störta samman.
-
- `
- var YAML = `---
-template : chapter
----
-
-Mycket olika byggnader har man i de nordiska rikena: pyramidformiga, kilformiga, välvda, runda och fyrkantiga. De pyramidformiga består helt enkelt av träribbor, som upptill löper samman och nedtill bildar en vidare krets; de är avsedda att användas av hantverkarna under sommaren, för att de inte ska plågas av solen, på samma gång som de besväras av rök och eld. De kilformiga husen är i regel försedda med höga tak, för att de täta och tunga snömassorna fortare ska kunna blåsa av och inte tynga ned taken. Dessa är täckta av björknäver, tegel eller kluvet spån av furu - för kådans skull -, gran, ek eller bok; taken på de förmögnas hus däremot med plåtar av koppar eller bly, i likhet med kyrktaken. Valvbyggnaderna uppförs ganska konstnärligt till skydd mot våldsamma vindar och snöfall, görs av sten eller trä, och är avsedda för olika alldagliga viktiga ändamål. Liknande byggnader kan finnas i stormännens gårdar där de används som förvaringsrum för husgeråd och jordbruksredskap. De runda byggnaderna - som för övrigt är de högst sällsynta - används av konstnärer, som vid sitt arbete behöver ett jämnt fördelat ljus från taket. Vanligast är de fyrkantiga husen, vars grova bjälkar är synnerligen väl hopfogade i hörnen - ett sant mästerverk av byggnadskonst; även dessa har fönster högt uppe i taken, för att dagsljuset skall kunna strömma in och ge alla därinne full belysning. Stenhusen har dörröppningar i förhållande till byggnadens storlek, men smala fönstergluggar, som skydd mot den stränga kölden, frosten och snön. Vore de större och vidare, såsom fönstren i Italien, skulle husen i följd av den fint yrande snön, som röres upp av den starka blåsten, precis som dammet av virvelvinden, snart nog fyllas med massor av snö och inte kunna stå emot dess tryck, utan störta samman.
-
- `
- var NONE = `
-
-Mycket olika byggnader har man i de nordiska rikena: pyramidformiga, kilformiga, välvda, runda och fyrkantiga. De pyramidformiga består helt enkelt av träribbor, som upptill löper samman och nedtill bildar en vidare krets; de är avsedda att användas av hantverkarna under sommaren, för att de inte ska plågas av solen, på samma gång som de besväras av rök och eld. De kilformiga husen är i regel försedda med höga tak, för att de täta och tunga snömassorna fortare ska kunna blåsa av och inte tynga ned taken. Dessa är täckta av björknäver, tegel eller kluvet spån av furu - för kådans skull -, gran, ek eller bok; taken på de förmögnas hus däremot med plåtar av koppar eller bly, i likhet med kyrktaken. Valvbyggnaderna uppförs ganska konstnärligt till skydd mot våldsamma vindar och snöfall, görs av sten eller trä, och är avsedda för olika alldagliga viktiga ändamål. Liknande byggnader kan finnas i stormännens gårdar där de används som förvaringsrum för husgeråd och jordbruksredskap. De runda byggnaderna - som för övrigt är de högst sällsynta - används av konstnärer, som vid sitt arbete behöver ett jämnt fördelat ljus från taket. Vanligast är de fyrkantiga husen, vars grova bjälkar är synnerligen väl hopfogade i hörnen - ett sant mästerverk av byggnadskonst; även dessa har fönster högt uppe i taken, för att dagsljuset skall kunna strömma in och ge alla därinne full belysning. Stenhusen har dörröppningar i förhållande till byggnadens storlek, men smala fönstergluggar, som skydd mot den stränga kölden, frosten och snön. Vore de större och vidare, såsom fönstren i Italien, skulle husen i följd av den fint yrande snön, som röres upp av den starka blåsten, precis som dammet av virvelvinden, snart nog fyllas med massor av snö och inte kunna stå emot dess tryck, utan störta samman.
-
- `
- var expectedBody = `Mycket olika byggnader har man i de nordiska rikena: pyramidformiga, kilformiga, välvda, runda och fyrkantiga. De pyramidformiga består helt enkelt av träribbor, som upptill löper samman och nedtill bildar en vidare krets; de är avsedda att användas av hantverkarna under sommaren, för att de inte ska plågas av solen, på samma gång som de besväras av rök och eld. De kilformiga husen är i regel försedda med höga tak, för att de täta och tunga snömassorna fortare ska kunna blåsa av och inte tynga ned taken. Dessa är täckta av björknäver, tegel eller kluvet spån av furu - för kådans skull -, gran, ek eller bok; taken på de förmögnas hus däremot med plåtar av koppar eller bly, i likhet med kyrktaken. Valvbyggnaderna uppförs ganska konstnärligt till skydd mot våldsamma vindar och snöfall, görs av sten eller trä, och är avsedda för olika alldagliga viktiga ändamål. Liknande byggnader kan finnas i stormännens gårdar där de används som förvaringsrum för husgeråd och jordbruksredskap. De runda byggnaderna - som för övrigt är de högst sällsynta - används av konstnärer, som vid sitt arbete behöver ett jämnt fördelat ljus från taket. Vanligast är de fyrkantiga husen, vars grova bjälkar är synnerligen väl hopfogade i hörnen - ett sant mästerverk av byggnadskonst; även dessa har fönster högt uppe i taken, för att dagsljuset skall kunna strömma in och ge alla därinne full belysning. Stenhusen har dörröppningar i förhållande till byggnadens storlek, men smala fönstergluggar, som skydd mot den stränga kölden, frosten och snön. Vore de större och vidare, såsom fönstren i Italien, skulle husen i följd av den fint yrande snön, som röres upp av den starka blåsten, precis som dammet av virvelvinden, snart nog fyllas med massor av snö och inte kunna stå emot dess tryck, utan störta samman.
-`
-
- data := []struct {
- pType string
- testData string
- }{
- {"JSON", JSON},
- {"TOML", TOML},
- {"YAML", YAML},
- {"None", NONE},
- }
- for _, v := range data {
- p := GetParser([]byte(v.testData))
- if v.pType != p.Type() {
- t.Fatalf("Wrong parser type, expected %v, got %v", v.pType, p.Type())
- }
- md := p.Markdown()
- if strings.TrimSpace(string(md)) != strings.TrimSpace(expectedBody) {
- t.Log("Provided:", v.testData)
- t.Log("Returned:", p.Markdown())
- t.Fatalf("Error, mismatched body in expected type %v, matched type %v", v.pType, p.Type())
- }
- }
-}
diff --git a/caddyhttp/markdown/metadata/metadata_toml.go b/caddyhttp/markdown/metadata/metadata_toml.go
deleted file mode 100644
index 8f8800b2e5a..00000000000
--- a/caddyhttp/markdown/metadata/metadata_toml.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package metadata
-
-import (
- "bytes"
-
- "github.com/naoina/toml"
-)
-
-// TOMLParser is the Parser for TOML
-type TOMLParser struct {
- metadata Metadata
- markdown *bytes.Buffer
-}
-
-// Type returns the kind of parser this struct is.
-func (t *TOMLParser) Type() string {
- return "TOML"
-}
-
-// Init prepares and parses the metadata and markdown file itself
-func (t *TOMLParser) Init(b *bytes.Buffer) bool {
- meta, data := splitBuffer(b, "+++")
- if meta == nil || data == nil {
- return false
- }
- t.markdown = data
-
- m := make(map[string]interface{})
- if err := toml.Unmarshal(meta.Bytes(), &m); err != nil {
- return false
- }
- t.metadata = NewMetadata(m)
-
- return true
-}
-
-// Metadata returns parsed metadata. It should be called
-// only after a call to Parse returns without error.
-func (t *TOMLParser) Metadata() Metadata {
- return t.metadata
-}
-
-// Markdown returns parser markdown. It should be called only after a call to Parse returns without error.
-func (t *TOMLParser) Markdown() []byte {
- return t.markdown.Bytes()
-}
diff --git a/caddyhttp/markdown/metadata/metadata_yaml.go b/caddyhttp/markdown/metadata/metadata_yaml.go
deleted file mode 100644
index f8e9acfd342..00000000000
--- a/caddyhttp/markdown/metadata/metadata_yaml.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package metadata
-
-import (
- "bytes"
-
- "gopkg.in/yaml.v2"
-)
-
-// YAMLParser is the Parser for YAML
-type YAMLParser struct {
- metadata Metadata
- markdown *bytes.Buffer
-}
-
-// Type returns the kind of metadata parser.
-func (y *YAMLParser) Type() string {
- return "YAML"
-}
-
-// Init prepares the metadata parser for parsing.
-func (y *YAMLParser) Init(b *bytes.Buffer) bool {
- meta, data := splitBuffer(b, "---")
- if meta == nil || data == nil {
- return false
- }
- y.markdown = data
-
- m := make(map[string]interface{})
- if err := yaml.Unmarshal(meta.Bytes(), &m); err != nil {
- return false
- }
- y.metadata = NewMetadata(m)
-
- return true
-}
-
-// Metadata returns parsed metadata. It should be called
-// only after a call to Parse returns without error.
-func (y *YAMLParser) Metadata() Metadata {
- return y.metadata
-}
-
-// Markdown renders the text as a byte array
-func (y *YAMLParser) Markdown() []byte {
- return y.markdown.Bytes()
-}
diff --git a/caddyhttp/markdown/process.go b/caddyhttp/markdown/process.go
deleted file mode 100644
index 7d4f85ebcfd..00000000000
--- a/caddyhttp/markdown/process.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package markdown
-
-import (
- "io"
- "io/ioutil"
- "os"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
- "github.com/mholt/caddy/caddyhttp/markdown/metadata"
- "github.com/mholt/caddy/caddyhttp/markdown/summary"
- "github.com/russross/blackfriday"
-)
-
-// FileInfo represents a file in a particular server context. It wraps the os.FileInfo struct.
-type FileInfo struct {
- os.FileInfo
- ctx httpserver.Context
-}
-
-var recognizedMetaTags = []string{
- "author",
- "copyright",
- "description",
- "subject",
-}
-
-// Summarize returns an abbreviated string representation of the markdown stored in this file.
-// wordcount is the number of words returned in the summary.
-func (f FileInfo) Summarize(wordcount int) (string, error) {
- fp, err := f.ctx.Root.Open(f.Name())
- if err != nil {
- return "", err
- }
- defer fp.Close()
-
- buf, err := ioutil.ReadAll(fp)
- if err != nil {
- return "", err
- }
-
- return string(summary.Markdown(buf, wordcount)), nil
-}
-
-// Markdown processes the contents of a page in b. It parses the metadata
-// (if any) and uses the template (if found).
-func (c *Config) Markdown(title string, r io.Reader, dirents []os.FileInfo, ctx httpserver.Context) ([]byte, error) {
- body, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, err
- }
-
- parser := metadata.GetParser(body)
- markdown := parser.Markdown()
- mdata := parser.Metadata()
-
- // process markdown
- extns := 0
- extns |= blackfriday.EXTENSION_TABLES
- extns |= blackfriday.EXTENSION_FENCED_CODE
- extns |= blackfriday.EXTENSION_STRIKETHROUGH
- extns |= blackfriday.EXTENSION_DEFINITION_LISTS
- html := blackfriday.Markdown(markdown, c.Renderer, extns)
-
- // set it as body for template
- mdata.Variables["body"] = string(html)
-
- // fixup title
- mdata.Variables["title"] = mdata.Title
- if mdata.Variables["title"] == "" {
- mdata.Variables["title"] = title
- }
-
- // move available and valid front matters to the meta values
- meta := make(map[string]string)
- for _, val := range recognizedMetaTags {
- if mVal, ok := mdata.Variables[val]; ok {
- meta[val] = mVal.(string)
- }
- }
-
- // massage possible files
- files := []FileInfo{}
- for _, ent := range dirents {
- file := FileInfo{
- FileInfo: ent,
- ctx: ctx,
- }
- files = append(files, file)
- }
-
- return execTemplate(c, mdata, meta, files, ctx)
-}
diff --git a/caddyhttp/markdown/process_test.go b/caddyhttp/markdown/process_test.go
deleted file mode 100644
index fbafaf989c3..00000000000
--- a/caddyhttp/markdown/process_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package markdown
-
-import (
- "os"
- "strings"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestConfig_Markdown(t *testing.T) {
- tests := []map[string]string{
- {"author": "authorVal"},
- {"copyright": "copyrightVal"},
- {"description": "descriptionVal"},
- {"subject": "subjectVal"},
- {"author": "authorVal", "copyright": "copyrightVal"},
- {"author": "authorVal", "copyright": "copyrightVal", "description": "descriptionVal"},
- {"author": "authorVal", "copyright": "copyrightVal", "description": "descriptionVal", "subject": "subjectVal"},
- }
-
- for i, meta := range tests {
- config := &Config{
- Template: GetDefaultTemplate(),
- }
-
- toml := "+++"
- for key, val := range meta {
- toml = toml + "\n" + key + "= \"" + val + "\""
- }
- toml = toml + "\n+++"
-
- res, _ := config.Markdown("Test title", strings.NewReader(toml), []os.FileInfo{}, httpserver.Context{})
- sRes := string(res)
-
- for key, val := range meta {
- c := strings.Contains(sRes, "")
- if !c {
- t.Error("Test case", i, "should contain meta", key, val)
- }
- }
- }
-}
diff --git a/caddyhttp/markdown/setup.go b/caddyhttp/markdown/setup.go
deleted file mode 100644
index 8792f85bc89..00000000000
--- a/caddyhttp/markdown/setup.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package markdown
-
-import (
- "net/http"
- "path/filepath"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
- "github.com/russross/blackfriday"
-)
-
-func init() {
- caddy.RegisterPlugin("markdown", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// setup configures a new Markdown middleware instance.
-func setup(c *caddy.Controller) error {
- mdconfigs, err := markdownParse(c)
- if err != nil {
- return err
- }
-
- cfg := httpserver.GetConfig(c)
-
- md := Markdown{
- Root: cfg.Root,
- FileSys: http.Dir(cfg.Root),
- Configs: mdconfigs,
- }
-
- cfg.AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- md.Next = next
- return md
- })
-
- return nil
-}
-
-func markdownParse(c *caddy.Controller) ([]*Config, error) {
- var mdconfigs []*Config
-
- for c.Next() {
- md := &Config{
- Renderer: blackfriday.HtmlRenderer(0, "", ""),
- Extensions: make(map[string]struct{}),
- Template: GetDefaultTemplate(),
- IndexFiles: []string{},
- TemplateFiles: make(map[string]string),
- }
-
- // Get the path scope
- args := c.RemainingArgs()
- switch len(args) {
- case 0:
- md.PathScope = "/"
- case 1:
- md.PathScope = args[0]
- default:
- return mdconfigs, c.ArgErr()
- }
-
- // Load any other configuration parameters
- for c.NextBlock() {
- if err := loadParams(c, md); err != nil {
- return mdconfigs, err
- }
- }
-
- // If no extensions were specified, assume some defaults
- if len(md.Extensions) == 0 {
- md.Extensions[".md"] = struct{}{}
- md.Extensions[".markdown"] = struct{}{}
- md.Extensions[".mdown"] = struct{}{}
- }
-
- // Make a list of index files to match extensions
- for ext := range md.Extensions {
- md.IndexFiles = append(md.IndexFiles, "index"+ext)
- }
- mdconfigs = append(mdconfigs, md)
- }
-
- return mdconfigs, nil
-}
-
-func loadParams(c *caddy.Controller, mdc *Config) error {
- cfg := httpserver.GetConfig(c)
-
- switch c.Val() {
- case "ext":
- for _, ext := range c.RemainingArgs() {
- mdc.Extensions[ext] = struct{}{}
- }
- return nil
- case "css":
- if !c.NextArg() {
- return c.ArgErr()
- }
- mdc.Styles = append(mdc.Styles, c.Val())
- return nil
- case "js":
- if !c.NextArg() {
- return c.ArgErr()
- }
- mdc.Scripts = append(mdc.Scripts, c.Val())
- return nil
- case "template":
- tArgs := c.RemainingArgs()
- switch len(tArgs) {
- default:
- return c.ArgErr()
- case 1:
- fpath := filepath.ToSlash(filepath.Clean(cfg.Root + string(filepath.Separator) + tArgs[0]))
-
- if err := SetTemplate(mdc.Template, "", fpath); err != nil {
- return c.Errf("default template parse error: %v", err)
- }
-
- mdc.TemplateFiles[""] = fpath
- return nil
- case 2:
- fpath := filepath.ToSlash(filepath.Clean(cfg.Root + string(filepath.Separator) + tArgs[1]))
-
- if err := SetTemplate(mdc.Template, tArgs[0], fpath); err != nil {
- return c.Errf("template parse error: %v", err)
- }
-
- mdc.TemplateFiles[tArgs[0]] = fpath
- return nil
- }
- case "templatedir":
- if !c.NextArg() {
- return c.ArgErr()
- }
-
- pattern := c.Val()
- _, err := mdc.Template.ParseGlob(pattern)
- if err != nil {
- return c.Errf("template load error: %v", err)
- }
- if c.NextArg() {
- return c.ArgErr()
- }
-
- paths, err := filepath.Glob(pattern)
- if err != nil {
- return c.Errf("glob %q failed: %v", pattern, err)
- }
- for _, path := range paths {
- mdc.TemplateFiles[filepath.Base(path)] = path
- }
- return nil
- default:
- return c.Err("Expected valid markdown configuration property")
- }
-}
diff --git a/caddyhttp/markdown/setup_test.go b/caddyhttp/markdown/setup_test.go
deleted file mode 100644
index 596ad84d2ec..00000000000
--- a/caddyhttp/markdown/setup_test.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package markdown
-
-import (
- "bytes"
- "fmt"
- "net/http"
- "reflect"
- "testing"
- "text/template"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
- c := caddy.NewTestController("http", `markdown /blog`)
- err := setup(c)
- if err != nil {
- t.Errorf("Expected no errors, got: %v", err)
- }
- mids := httpserver.GetConfig(c).Middleware()
- if len(mids) == 0 {
- t.Fatal("Expected middleware, got 0 instead")
- }
-
- handler := mids[0](httpserver.EmptyNext)
- myHandler, ok := handler.(Markdown)
-
- if !ok {
- t.Fatalf("Expected handler to be type Markdown, got: %#v", handler)
- }
-
- if myHandler.Configs[0].PathScope != "/blog" {
- t.Errorf("Expected /blog as the Path Scope")
- }
- if len(myHandler.Configs[0].Extensions) != 3 {
- t.Error("Expected 3 markdown extensions")
- }
- for _, key := range []string{".md", ".markdown", ".mdown"} {
- if ext, ok := myHandler.Configs[0].Extensions[key]; !ok {
- t.Errorf("Expected extensions to contain %v", ext)
- }
- }
-}
-
-func TestMarkdownParse(t *testing.T) {
- tests := []struct {
- inputMarkdownConfig string
- shouldErr bool
- expectedMarkdownConfig []Config
- }{
-
- {`markdown /blog {
- ext .md .txt
- css /resources/css/blog.css
- js /resources/js/blog.js
-}`, false, []Config{{
- PathScope: "/blog",
- Extensions: map[string]struct{}{
- ".md": {},
- ".txt": {},
- },
- Styles: []string{"/resources/css/blog.css"},
- Scripts: []string{"/resources/js/blog.js"},
- Template: GetDefaultTemplate(),
- TemplateFiles: make(map[string]string),
- }}},
- {`markdown /blog {
- ext .md
- template tpl_with_include.html
-}`, false, []Config{{
- PathScope: "/blog",
- Extensions: map[string]struct{}{
- ".md": {},
- },
- Template: setDefaultTemplate("./testdata/tpl_with_include.html"),
- TemplateFiles: map[string]string{
- "": "testdata/tpl_with_include.html",
- },
- }}},
- }
-
- for i, test := range tests {
- c := caddy.NewTestController("http", test.inputMarkdownConfig)
- httpserver.GetConfig(c).Root = "./testdata"
- actualMarkdownConfigs, err := markdownParse(c)
-
- if err == nil && test.shouldErr {
- t.Errorf("Test %d didn't error, but it should have", i)
- } else if err != nil && !test.shouldErr {
- t.Errorf("Test %d errored, but it shouldn't have; got '%v'", i, err)
- }
- if len(actualMarkdownConfigs) != len(test.expectedMarkdownConfig) {
- t.Fatalf("Test %d expected %d no of WebSocket configs, but got %d ",
- i, len(test.expectedMarkdownConfig), len(actualMarkdownConfigs))
- }
- for j, actualMarkdownConfig := range actualMarkdownConfigs {
-
- if actualMarkdownConfig.PathScope != test.expectedMarkdownConfig[j].PathScope {
- t.Errorf("Test %d expected %dth Markdown PathScope to be %s , but got %s",
- i, j, test.expectedMarkdownConfig[j].PathScope, actualMarkdownConfig.PathScope)
- }
-
- if fmt.Sprint(actualMarkdownConfig.Styles) != fmt.Sprint(test.expectedMarkdownConfig[j].Styles) {
- t.Errorf("Test %d expected %dth Markdown Config Styles to be %s , but got %s",
- i, j, fmt.Sprint(test.expectedMarkdownConfig[j].Styles), fmt.Sprint(actualMarkdownConfig.Styles))
- }
- if fmt.Sprint(actualMarkdownConfig.Scripts) != fmt.Sprint(test.expectedMarkdownConfig[j].Scripts) {
- t.Errorf("Test %d expected %dth Markdown Config Scripts to be %s , but got %s",
- i, j, fmt.Sprint(test.expectedMarkdownConfig[j].Scripts), fmt.Sprint(actualMarkdownConfig.Scripts))
- }
- if ok, tx, ty := equalTemplates(actualMarkdownConfig.Template, test.expectedMarkdownConfig[j].Template); !ok {
- t.Errorf("Test %d the %dth Markdown Config Templates did not match, expected %s to be %s", i, j, tx, ty)
- }
- if expect, got := test.expectedMarkdownConfig[j].TemplateFiles, actualMarkdownConfig.TemplateFiles; !reflect.DeepEqual(expect, got) {
- t.Errorf("Test %d the %d Markdown config TemplateFiles did not match, expect %v, but got %v", i, j, expect, got)
- }
-
- }
- }
-}
-
-func equalTemplates(i, j *template.Template) (bool, string, string) {
- // Just in case :)
- if i == j {
- return true, "", ""
- }
-
- // We can't do much here, templates can't really be compared. However,
- // we can execute the templates and compare their outputs to be reasonably
- // sure that they're the same.
-
- // This is exceedingly ugly.
- ctx := httpserver.Context{
- Root: http.Dir("./testdata"),
- }
-
- md := Data{
- Context: ctx,
- Doc: make(map[string]interface{}),
- Styles: []string{"style1"},
- Scripts: []string{"js1"},
- }
- md.Doc["title"] = "some title"
- md.Doc["body"] = "some body"
-
- bufi := new(bytes.Buffer)
- bufj := new(bytes.Buffer)
-
- if err := i.Execute(bufi, md); err != nil {
- return false, fmt.Sprintf("%v", err), ""
- }
- if err := j.Execute(bufj, md); err != nil {
- return false, "", fmt.Sprintf("%v", err)
- }
-
- return bytes.Equal(bufi.Bytes(), bufj.Bytes()), string(bufi.Bytes()), string(bufj.Bytes())
-}
diff --git a/caddyhttp/markdown/summary/render.go b/caddyhttp/markdown/summary/render.go
deleted file mode 100644
index b23affbd189..00000000000
--- a/caddyhttp/markdown/summary/render.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package summary
-
-import (
- "bytes"
-
- "github.com/russross/blackfriday"
-)
-
-// Ensure we implement the Blackfriday Markdown Renderer interface
-var _ blackfriday.Renderer = (*renderer)(nil)
-
-// renderer renders Markdown to plain-text meant for listings and excerpts,
-// and implements the blackfriday.Renderer interface.
-//
-// Many of the methods are stubs with no output to prevent output of HTML markup.
-type renderer struct{}
-
-// Blocklevel callbacks
-
-// BlockCode is the code tag callback.
-func (r renderer) BlockCode(out *bytes.Buffer, text []byte, land string) {}
-
-// BlockQuote is the quote tag callback.
-func (r renderer) BlockQuote(out *bytes.Buffer, text []byte) {}
-
-// BlockHtml is the HTML tag callback.
-func (r renderer) BlockHtml(out *bytes.Buffer, text []byte) {}
-
-// Header is the header tag callback.
-func (r renderer) Header(out *bytes.Buffer, text func() bool, level int, id string) {}
-
-// HRule is the horizontal rule tag callback.
-func (r renderer) HRule(out *bytes.Buffer) {}
-
-// List is the list tag callback.
-func (r renderer) List(out *bytes.Buffer, text func() bool, flags int) {
- // TODO: This is not desired (we'd rather not write lists as part of summary),
- // but see this issue: https://github.com/russross/blackfriday/issues/189
- marker := out.Len()
- if !text() {
- out.Truncate(marker)
- }
- out.Write([]byte{' '})
-}
-
-// ListItem is the list item tag callback.
-func (r renderer) ListItem(out *bytes.Buffer, text []byte, flags int) {}
-
-// Paragraph is the paragraph tag callback. This renders simple paragraph text
-// into plain text, such that summaries can be easily generated.
-func (r renderer) Paragraph(out *bytes.Buffer, text func() bool) {
- marker := out.Len()
- if !text() {
- out.Truncate(marker)
- }
- out.Write([]byte{' '})
-}
-
-// Table is the table tag callback.
-func (r renderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {}
-
-// TableRow is the table row tag callback.
-func (r renderer) TableRow(out *bytes.Buffer, text []byte) {}
-
-// TableHeaderCell is the table header cell tag callback.
-func (r renderer) TableHeaderCell(out *bytes.Buffer, text []byte, flags int) {}
-
-// TableCell is the table cell tag callback.
-func (r renderer) TableCell(out *bytes.Buffer, text []byte, flags int) {}
-
-// Footnotes is the foot notes tag callback.
-func (r renderer) Footnotes(out *bytes.Buffer, text func() bool) {}
-
-// FootnoteItem is the footnote item tag callback.
-func (r renderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {}
-
-// TitleBlock is the title tag callback.
-func (r renderer) TitleBlock(out *bytes.Buffer, text []byte) {}
-
-// Spanlevel callbacks
-
-// AutoLink is the autolink tag callback.
-func (r renderer) AutoLink(out *bytes.Buffer, link []byte, kind int) {}
-
-// CodeSpan is the code span tag callback. Outputs a simple Markdown version
-// of the code span.
-func (r renderer) CodeSpan(out *bytes.Buffer, text []byte) {
- out.Write([]byte("`"))
- out.Write(text)
- out.Write([]byte("`"))
-}
-
-// DoubleEmphasis is the double emphasis tag callback. Outputs a simple
-// plain-text version of the input.
-func (r renderer) DoubleEmphasis(out *bytes.Buffer, text []byte) {
- out.Write(text)
-}
-
-// Emphasis is the emphasis tag callback. Outputs a simple plain-text
-// version of the input.
-func (r renderer) Emphasis(out *bytes.Buffer, text []byte) {
- out.Write(text)
-}
-
-// Image is the image tag callback.
-func (r renderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {}
-
-// LineBreak is the line break tag callback.
-func (r renderer) LineBreak(out *bytes.Buffer) {}
-
-// Link is the link tag callback. Outputs a sipmle plain-text version
-// of the input.
-func (r renderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
- out.Write(content)
-}
-
-// RawHtmlTag is the raw HTML tag callback.
-func (r renderer) RawHtmlTag(out *bytes.Buffer, tag []byte) {}
-
-// TripleEmphasis is the triple emphasis tag callback. Outputs a simple plain-text
-// version of the input.
-func (r renderer) TripleEmphasis(out *bytes.Buffer, text []byte) {
- out.Write(text)
-}
-
-// StrikeThrough is the strikethrough tag callback.
-func (r renderer) StrikeThrough(out *bytes.Buffer, text []byte) {}
-
-// FootnoteRef is the footnote ref tag callback.
-func (r renderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {}
-
-// Lowlevel callbacks
-
-// Entity callback. Outputs a simple plain-text version of the input.
-func (r renderer) Entity(out *bytes.Buffer, entity []byte) {
- out.Write(entity)
-}
-
-// NormalText callback. Outputs a simple plain-text version of the input.
-func (r renderer) NormalText(out *bytes.Buffer, text []byte) {
- out.Write(text)
-}
-
-// Header and footer
-
-// DocumentHeader callback.
-func (r renderer) DocumentHeader(out *bytes.Buffer) {}
-
-// DocumentFooter callback.
-func (r renderer) DocumentFooter(out *bytes.Buffer) {}
-
-// GetFlags returns zero.
-func (r renderer) GetFlags() int { return 0 }
diff --git a/caddyhttp/markdown/summary/summary.go b/caddyhttp/markdown/summary/summary.go
deleted file mode 100644
index e55bba2c9b0..00000000000
--- a/caddyhttp/markdown/summary/summary.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package summary
-
-import (
- "bytes"
-
- "github.com/russross/blackfriday"
-)
-
-// Markdown formats input using a plain-text renderer, and
-// then returns up to the first `wordcount` words as a summary.
-func Markdown(input []byte, wordcount int) []byte {
- words := bytes.Fields(blackfriday.Markdown(input, renderer{}, 0))
- if wordcount > len(words) {
- wordcount = len(words)
- }
- return bytes.Join(words[0:wordcount], []byte{' '})
-}
diff --git a/caddyhttp/markdown/summary/summary_test.go b/caddyhttp/markdown/summary/summary_test.go
deleted file mode 100644
index f87c00db00c..00000000000
--- a/caddyhttp/markdown/summary/summary_test.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package summary
-
-import "testing"
-
-func TestMarkdown(t *testing.T) {
- input := []byte(`Testing with just a few words.`)
- got := string(Markdown(input, 3))
- if want := "Testing with just"; want != got {
- t.Errorf("Expected '%s' but got '%s'", want, got)
- }
-}
diff --git a/caddyhttp/markdown/template.go b/caddyhttp/markdown/template.go
deleted file mode 100644
index b21e598499a..00000000000
--- a/caddyhttp/markdown/template.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package markdown
-
-import (
- "bytes"
- "io/ioutil"
- "text/template"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
- "github.com/mholt/caddy/caddyhttp/markdown/metadata"
-)
-
-// Data represents a markdown document.
-type Data struct {
- httpserver.Context
- Doc map[string]interface{}
- Styles []string
- Scripts []string
- Meta map[string]string
- Files []FileInfo
-}
-
-// Include "overrides" the embedded httpserver.Context's Include()
-// method so that included files have access to d's fields.
-// Note: using {{template 'template-name' .}} instead might be better.
-func (d Data) Include(filename string, args ...interface{}) (string, error) {
- d.Args = args
- return httpserver.ContextInclude(filename, d, d.Root)
-}
-
-// execTemplate executes a template given a requestPath, template, and metadata
-func execTemplate(c *Config, mdata metadata.Metadata, meta map[string]string, files []FileInfo, ctx httpserver.Context) ([]byte, error) {
- mdData := Data{
- Context: ctx,
- Doc: mdata.Variables,
- Styles: c.Styles,
- Scripts: c.Scripts,
- Meta: meta,
- Files: files,
- }
-
- templateName := mdata.Template
- // reload template on every request for now
- // TODO: cache templates by a general plugin
- if templateFile, ok := c.TemplateFiles[templateName]; ok {
- err := SetTemplate(c.Template, templateName, templateFile)
- if err != nil {
- return nil, err
- }
- }
-
- b := new(bytes.Buffer)
- if err := c.Template.ExecuteTemplate(b, templateName, mdData); err != nil {
- return nil, err
- }
-
- return b.Bytes(), nil
-}
-
-// SetTemplate reads in the template with the filename provided. If the file does not exist or is not parsable, it will return an error.
-func SetTemplate(t *template.Template, name, filename string) error {
-
- // Read template
- buf, err := ioutil.ReadFile(filename)
- if err != nil {
- return err
- }
-
- // Update if exists
- if tt := t.Lookup(name); tt != nil {
- _, err = tt.Parse(string(buf))
- return err
- }
-
- // Allocate new name if not
- _, err = t.New(name).Parse(string(buf))
- return err
-}
-
-// GetDefaultTemplate returns the default template.
-func GetDefaultTemplate() *template.Template {
- return template.Must(template.New("").Parse(defaultTemplate))
-}
-
-const (
- defaultTemplate = `
-
-
- {{.Doc.title}}
-
- {{range $key, $val := .Meta}}
-
- {{end}}
- {{- range .Styles}}
-
- {{- end}}
- {{- range .Scripts}}
-
- {{- end}}
-
-
- {{.Doc.body}}
-
-`
-)
diff --git a/caddyhttp/markdown/testdata/blog/test.md b/caddyhttp/markdown/testdata/blog/test.md
deleted file mode 100644
index 93f07a49348..00000000000
--- a/caddyhttp/markdown/testdata/blog/test.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Markdown test 1
-sitename: A Caddy website
----
-
-## Welcome on the blog
-
-Body
-
-``` go
-func getTrue() bool {
- return true
-}
-```
diff --git a/caddyhttp/markdown/testdata/docflags/template.txt b/caddyhttp/markdown/testdata/docflags/template.txt
deleted file mode 100644
index 2b001388f15..00000000000
--- a/caddyhttp/markdown/testdata/docflags/template.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Doc.var_string {{.Doc.var_string}}
-Doc.var_bool {{.Doc.var_bool}}
diff --git a/caddyhttp/markdown/testdata/docflags/test.md b/caddyhttp/markdown/testdata/docflags/test.md
deleted file mode 100644
index 64ca7f78d5e..00000000000
--- a/caddyhttp/markdown/testdata/docflags/test.md
+++ /dev/null
@@ -1,4 +0,0 @@
----
-var_string: hello
-var_bool: true
----
diff --git a/caddyhttp/markdown/testdata/header.html b/caddyhttp/markdown/testdata/header.html
deleted file mode 100644
index cfbdc75b50a..00000000000
--- a/caddyhttp/markdown/testdata/header.html
+++ /dev/null
@@ -1 +0,0 @@
-
Header for: {{.Doc.title}}
\ No newline at end of file
diff --git a/caddyhttp/markdown/testdata/log/test.md b/caddyhttp/markdown/testdata/log/test.md
deleted file mode 100644
index 476ab3015c3..00000000000
--- a/caddyhttp/markdown/testdata/log/test.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Markdown test 2
-sitename: A Caddy website
----
-
-## Welcome on the blog
-
-Body
-
-``` go
-func getTrue() bool {
- return true
-}
-```
diff --git a/caddyhttp/markdown/testdata/markdown_tpl.html b/caddyhttp/markdown/testdata/markdown_tpl.html
deleted file mode 100644
index 7c697850001..00000000000
--- a/caddyhttp/markdown/testdata/markdown_tpl.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-
-{{.Doc.title}}
-
-
-{{.Include "header.html"}}
-Welcome to {{.Doc.sitename}}!
-{{.Doc.body}}
-
-
diff --git a/caddyhttp/markdown/testdata/og/first.md b/caddyhttp/markdown/testdata/og/first.md
deleted file mode 100644
index 4d7a4251f6e..00000000000
--- a/caddyhttp/markdown/testdata/og/first.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: first_post
-sitename: title
----
-# Test h1
diff --git a/caddyhttp/markdown/testdata/tpl_with_include.html b/caddyhttp/markdown/testdata/tpl_with_include.html
deleted file mode 100644
index 68cc986cf45..00000000000
--- a/caddyhttp/markdown/testdata/tpl_with_include.html
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-
- {{.Doc.title}}
-
-
- Welcome to {{.Doc.sitename}}!
-
- {{.Doc.body}}
-
-
diff --git a/caddyhttp/mime/mime.go b/caddyhttp/mime/mime.go
deleted file mode 100644
index b215fc8a061..00000000000
--- a/caddyhttp/mime/mime.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package mime
-
-import (
- "net/http"
- "path"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// Config represent a mime config. Map from extension to mime-type.
-// Note, this should be safe with concurrent read access, as this is
-// not modified concurrently.
-type Config map[string]string
-
-// Mime sets Content-Type header of requests based on configurations.
-type Mime struct {
- Next httpserver.Handler
- Configs Config
-}
-
-// ServeHTTP implements the httpserver.Handler interface.
-func (e Mime) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- // Get a clean /-path, grab the extension
- ext := path.Ext(path.Clean(r.URL.Path))
-
- if contentType, ok := e.Configs[ext]; ok {
- w.Header().Set("Content-Type", contentType)
- }
-
- return e.Next.ServeHTTP(w, r)
-}
diff --git a/caddyhttp/mime/mime_test.go b/caddyhttp/mime/mime_test.go
deleted file mode 100644
index f3e896dc6ed..00000000000
--- a/caddyhttp/mime/mime_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package mime
-
-import (
- "fmt"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestMimeHandler(t *testing.T) {
- mimes := Config{
- ".html": "text/html",
- ".txt": "text/plain",
- ".swf": "application/x-shockwave-flash",
- }
-
- m := Mime{Configs: mimes}
-
- w := httptest.NewRecorder()
- exts := []string{
- ".html", ".txt", ".swf",
- }
- for _, e := range exts {
- url := "/file" + e
- r, err := http.NewRequest("GET", url, nil)
- if err != nil {
- t.Error(err)
- }
- m.Next = nextFunc(true, mimes[e])
- _, err = m.ServeHTTP(w, r)
- if err != nil {
- t.Error(err)
- }
- }
-
- w = httptest.NewRecorder()
- exts = []string{
- ".htm1", ".abc", ".mdx",
- }
- for _, e := range exts {
- url := "/file" + e
- r, err := http.NewRequest("GET", url, nil)
- if err != nil {
- t.Error(err)
- }
- m.Next = nextFunc(false, "")
- _, err = m.ServeHTTP(w, r)
- if err != nil {
- t.Error(err)
- }
- }
-}
-
-func nextFunc(shouldMime bool, contentType string) httpserver.Handler {
- return httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- if shouldMime {
- if w.Header().Get("Content-Type") != contentType {
- return 0, fmt.Errorf("expected Content-Type: %v, found %v", contentType, w.Header().Get("Content-Type"))
- }
- return 0, nil
- }
- if w.Header().Get("Content-Type") != "" {
- return 0, fmt.Errorf("Content-Type header not expected")
- }
- return 0, nil
- })
-}
diff --git a/caddyhttp/mime/setup.go b/caddyhttp/mime/setup.go
deleted file mode 100644
index bca6224363c..00000000000
--- a/caddyhttp/mime/setup.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package mime
-
-import (
- "fmt"
- "strings"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("mime", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// setup configures a new mime middleware instance.
-func setup(c *caddy.Controller) error {
- configs, err := mimeParse(c)
- if err != nil {
- return err
- }
-
- httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- return Mime{Next: next, Configs: configs}
- })
-
- return nil
-}
-
-func mimeParse(c *caddy.Controller) (Config, error) {
- configs := Config{}
-
- for c.Next() {
- // At least one extension is required
-
- args := c.RemainingArgs()
- switch len(args) {
- case 2:
- if err := validateExt(configs, args[0]); err != nil {
- return configs, err
- }
- configs[args[0]] = args[1]
- case 1:
- return configs, c.ArgErr()
- case 0:
- for c.NextBlock() {
- ext := c.Val()
- if err := validateExt(configs, ext); err != nil {
- return configs, err
- }
- if !c.NextArg() {
- return configs, c.ArgErr()
- }
- configs[ext] = c.Val()
- }
- }
-
- }
-
- return configs, nil
-}
-
-// validateExt checks for valid file name extension.
-func validateExt(configs Config, ext string) error {
- if !strings.HasPrefix(ext, ".") {
- return fmt.Errorf(`mime: invalid extension "%v" (must start with dot)`, ext)
- }
- if _, ok := configs[ext]; ok {
- return fmt.Errorf(`mime: duplicate extension "%v" found`, ext)
- }
- return nil
-}
diff --git a/caddyhttp/mime/setup_test.go b/caddyhttp/mime/setup_test.go
deleted file mode 100644
index 571acf17efe..00000000000
--- a/caddyhttp/mime/setup_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package mime
-
-import (
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
- c := caddy.NewTestController("http", `mime .txt text/plain`)
- err := setup(c)
- if err != nil {
- t.Errorf("Expected no errors, but got: %v", err)
- }
- mids := httpserver.GetConfig(c).Middleware()
- if len(mids) == 0 {
- t.Fatal("Expected middleware, but had 0 instead")
- }
-
- handler := mids[0](httpserver.EmptyNext)
- myHandler, ok := handler.(Mime)
- if !ok {
- t.Fatalf("Expected handler to be type Mime, got: %#v", handler)
- }
-
- if !httpserver.SameNext(myHandler.Next, httpserver.EmptyNext) {
- t.Error("'Next' field of handler was not set properly")
- }
-
- tests := []struct {
- input string
- shouldErr bool
- }{
- {`mime {`, true},
- {`mime {}`, true},
- {`mime a b`, true},
- {`mime a {`, true},
- {`mime { txt f } `, true},
- {`mime { html } `, true},
- {`mime {
- .html text/html
- .txt text/plain
- } `, false},
- {`mime {
- .foo text/foo
- .bar text/bar
- .foo text/foobar
- } `, true},
- {`mime { .html text/html } `, false},
- {`mime { .html
- } `, true},
- {`mime .txt text/plain`, false},
- }
- for i, test := range tests {
- m, err := mimeParse(caddy.NewTestController("http", test.input))
- if test.shouldErr && err == nil {
- t.Errorf("Test %v: Expected error but found nil %v", i, m)
- } else if !test.shouldErr && err != nil {
- t.Errorf("Test %v: Expected no error but found error: %v", i, err)
- }
- }
-}
diff --git a/caddyhttp/pprof/pprof.go b/caddyhttp/pprof/pprof.go
deleted file mode 100644
index 3a0dbd93c1f..00000000000
--- a/caddyhttp/pprof/pprof.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package pprof
-
-import (
- "net/http"
- pp "net/http/pprof"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// BasePath is the base path to match for all pprof requests.
-const BasePath = "/debug/pprof"
-
-// Handler is a simple struct whose ServeHTTP will delegate pprof
-// endpoints to their equivalent net/http/pprof handlers.
-type Handler struct {
- Next httpserver.Handler
- Mux *http.ServeMux
-}
-
-// ServeHTTP handles requests to BasePath with pprof, or passes
-// all other requests up the chain.
-func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- if httpserver.Path(r.URL.Path).Matches(BasePath) {
- h.Mux.ServeHTTP(w, r)
- return 0, nil
- }
- return h.Next.ServeHTTP(w, r)
-}
-
-// NewMux returns a new http.ServeMux that routes pprof requests.
-// It pretty much copies what the std lib pprof does on init:
-// https://golang.org/src/net/http/pprof/pprof.go#L67
-func NewMux() *http.ServeMux {
- mux := http.NewServeMux()
- mux.HandleFunc(BasePath+"/", func(w http.ResponseWriter, r *http.Request) {
- // this endpoint, as implemented in the standard library, doesn't set
- // its Content-Type header, so using this can confuse clients, especially
- // if gzipping...
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- pp.Index(w, r)
- })
- mux.HandleFunc(BasePath+"/cmdline", pp.Cmdline)
- mux.HandleFunc(BasePath+"/profile", pp.Profile)
- mux.HandleFunc(BasePath+"/symbol", pp.Symbol)
- mux.HandleFunc(BasePath+"/trace", pp.Trace)
- return mux
-}
diff --git a/caddyhttp/pprof/pprof_test.go b/caddyhttp/pprof/pprof_test.go
deleted file mode 100644
index 2b870eb8256..00000000000
--- a/caddyhttp/pprof/pprof_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package pprof
-
-import (
- "fmt"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestServeHTTP(t *testing.T) {
- h := Handler{
- Next: httpserver.HandlerFunc(nextHandler),
- Mux: NewMux(),
- }
-
- w := httptest.NewRecorder()
- r, err := http.NewRequest("GET", "/debug/pprof", nil)
- if err != nil {
- t.Fatal(err)
- }
- status, err := h.ServeHTTP(w, r)
-
- if status != 0 {
- t.Errorf("Expected status %d but got %d", 0, status)
- }
- if err != nil {
- t.Errorf("Expected nil error, but got: %v", err)
- }
- if w.Body.String() == "content" {
- t.Errorf("Expected pprof to handle request, but it didn't")
- }
-
- w = httptest.NewRecorder()
- r, err = http.NewRequest("GET", "/foo", nil)
- if err != nil {
- t.Fatal(err)
- }
- status, err = h.ServeHTTP(w, r)
- if status != http.StatusNotFound {
- t.Errorf("Test two: Expected status %d but got %d", http.StatusNotFound, status)
- }
- if err != nil {
- t.Errorf("Test two: Expected nil error, but got: %v", err)
- }
- if w.Body.String() != "content" {
- t.Errorf("Expected pprof to pass the request through, but it didn't; got: %s", w.Body.String())
- }
-}
-
-func nextHandler(w http.ResponseWriter, r *http.Request) (int, error) {
- fmt.Fprintf(w, "content")
- return http.StatusNotFound, nil
-}
diff --git a/caddyhttp/pprof/setup.go b/caddyhttp/pprof/setup.go
deleted file mode 100644
index 638d5e126e5..00000000000
--- a/caddyhttp/pprof/setup.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package pprof
-
-import (
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("pprof", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// setup returns a new instance of a pprof handler. It accepts no arguments or options.
-func setup(c *caddy.Controller) error {
- found := false
-
- for c.Next() {
- if found {
- return c.Err("pprof can only be specified once")
- }
- if len(c.RemainingArgs()) != 0 {
- return c.ArgErr()
- }
- if c.NextBlock() {
- return c.ArgErr()
- }
- found = true
- }
-
- httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- return &Handler{Next: next, Mux: NewMux()}
- })
-
- return nil
-}
diff --git a/caddyhttp/pprof/setup_test.go b/caddyhttp/pprof/setup_test.go
deleted file mode 100644
index f51303ec8a9..00000000000
--- a/caddyhttp/pprof/setup_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package pprof
-
-import (
- "testing"
-
- "github.com/mholt/caddy"
-)
-
-func TestSetup(t *testing.T) {
- tests := []struct {
- input string
- shouldErr bool
- }{
- {`pprof`, false},
- {`pprof {}`, true},
- {`pprof /foo`, true},
- {`pprof {
- a b
- }`, true},
- {`pprof
- pprof`, true},
- }
- for i, test := range tests {
- c := caddy.NewTestController("http", test.input)
- err := setup(c)
- if test.shouldErr && err == nil {
- t.Errorf("Test %v: Expected error but found nil", i)
- } else if !test.shouldErr && err != nil {
- t.Errorf("Test %v: Expected no error but found error: %v", i, err)
- }
- }
-}
diff --git a/caddyhttp/proxy/body.go b/caddyhttp/proxy/body.go
deleted file mode 100644
index 38d0016596b..00000000000
--- a/caddyhttp/proxy/body.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package proxy
-
-import (
- "bytes"
- "io"
- "io/ioutil"
-)
-
-type bufferedBody struct {
- *bytes.Reader
-}
-
-func (*bufferedBody) Close() error {
- return nil
-}
-
-// rewind allows bufferedBody to be read again.
-func (b *bufferedBody) rewind() error {
- if b == nil {
- return nil
- }
- _, err := b.Seek(0, io.SeekStart)
- return err
-}
-
-// newBufferedBody returns *bufferedBody to use in place of src. Closes src
-// and returns Read error on src. All content from src is buffered.
-func newBufferedBody(src io.ReadCloser) (*bufferedBody, error) {
- if src == nil {
- return nil, nil
- }
- b, err := ioutil.ReadAll(src)
- src.Close()
- if err != nil {
- return nil, err
- }
- return &bufferedBody{
- Reader: bytes.NewReader(b),
- }, nil
-}
diff --git a/caddyhttp/proxy/body_test.go b/caddyhttp/proxy/body_test.go
deleted file mode 100644
index 5b72784cf29..00000000000
--- a/caddyhttp/proxy/body_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package proxy
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "testing"
-)
-
-func TestBodyRetry(t *testing.T) {
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- io.Copy(w, r.Body)
- r.Body.Close()
- }))
- defer ts.Close()
-
- testcase := "test content"
- req, err := http.NewRequest(http.MethodPost, ts.URL, bytes.NewBufferString(testcase))
- if err != nil {
- t.Fatal(err)
- }
-
- body, err := newBufferedBody(req.Body)
- if err != nil {
- t.Fatal(err)
- }
- if body != nil {
- req.Body = body
- }
-
- // simulate fail request
- host := req.URL.Host
- req.URL.Host = "example.com"
- body.rewind()
- _, _ = http.DefaultTransport.RoundTrip(req)
-
- // retry request
- req.URL.Host = host
- body.rewind()
- resp, err := http.DefaultTransport.RoundTrip(req)
- if err != nil {
- t.Fatal(err)
- }
- result, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- resp.Body.Close()
- if string(result) != testcase {
- t.Fatalf("result = %s, want %s", result, testcase)
- }
-
- // try one more time for body reuse
- body.rewind()
- resp, err = http.DefaultTransport.RoundTrip(req)
- if err != nil {
- t.Fatal(err)
- }
- result, err = ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- resp.Body.Close()
- if string(result) != testcase {
- t.Fatalf("result = %s, want %s", result, testcase)
- }
-}
diff --git a/caddyhttp/proxy/policy.go b/caddyhttp/proxy/policy.go
deleted file mode 100644
index f95c5b8f198..00000000000
--- a/caddyhttp/proxy/policy.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package proxy
-
-import (
- "hash/fnv"
- "math"
- "math/rand"
- "net"
- "net/http"
- "sync"
-)
-
-// HostPool is a collection of UpstreamHosts.
-type HostPool []*UpstreamHost
-
-// Policy decides how a host will be selected from a pool.
-type Policy interface {
- Select(pool HostPool, r *http.Request) *UpstreamHost
-}
-
-func init() {
- RegisterPolicy("random", func(arg string) Policy { return &Random{} })
- RegisterPolicy("least_conn", func(arg string) Policy { return &LeastConn{} })
- RegisterPolicy("round_robin", func(arg string) Policy { return &RoundRobin{} })
- RegisterPolicy("ip_hash", func(arg string) Policy { return &IPHash{} })
- RegisterPolicy("first", func(arg string) Policy { return &First{} })
- RegisterPolicy("uri_hash", func(arg string) Policy { return &URIHash{} })
- RegisterPolicy("header", func(arg string) Policy { return &Header{arg} })
-}
-
-// Random is a policy that selects up hosts from a pool at random.
-type Random struct{}
-
-// Select selects an up host at random from the specified pool.
-func (r *Random) Select(pool HostPool, request *http.Request) *UpstreamHost {
-
- // Because the number of available hosts isn't known
- // up front, the host is selected via reservoir sampling
- // https://en.wikipedia.org/wiki/Reservoir_sampling
- var randHost *UpstreamHost
- count := 0
- for _, host := range pool {
- if !host.Available() {
- continue
- }
-
- // (n % 1 == 0) holds for all n, therefore randHost
- // will always get assigned a value if there is
- // at least 1 available host
- count++
- if (rand.Int() % count) == 0 {
- randHost = host
- }
- }
- return randHost
-}
-
-// LeastConn is a policy that selects the host with the least connections.
-type LeastConn struct{}
-
-// Select selects the up host with the least number of connections in the
-// pool. If more than one host has the same least number of connections,
-// one of the hosts is chosen at random.
-func (r *LeastConn) Select(pool HostPool, request *http.Request) *UpstreamHost {
- var bestHost *UpstreamHost
- count := 0
- leastConn := int64(math.MaxInt64)
- for _, host := range pool {
- if !host.Available() {
- continue
- }
-
- if host.Conns < leastConn {
- leastConn = host.Conns
- count = 0
- }
-
- // Among hosts with same least connections, perform a reservoir
- // sample: https://en.wikipedia.org/wiki/Reservoir_sampling
- if host.Conns == leastConn {
- count++
- if (rand.Int() % count) == 0 {
- bestHost = host
- }
- }
- }
- return bestHost
-}
-
-// RoundRobin is a policy that selects hosts based on round-robin ordering.
-type RoundRobin struct {
- robin uint32
- mutex sync.Mutex
-}
-
-// Select selects an up host from the pool using a round-robin ordering scheme.
-func (r *RoundRobin) Select(pool HostPool, request *http.Request) *UpstreamHost {
- poolLen := uint32(len(pool))
- r.mutex.Lock()
- defer r.mutex.Unlock()
- // Return next available host
- for i := uint32(0); i < poolLen; i++ {
- r.robin++
- host := pool[r.robin%poolLen]
- if host.Available() {
- return host
- }
- }
- return nil
-}
-
-// hostByHashing returns an available host from pool based on a hashable string
-func hostByHashing(pool HostPool, s string) *UpstreamHost {
- poolLen := uint32(len(pool))
- index := hash(s) % poolLen
- for i := uint32(0); i < poolLen; i++ {
- index += i
- host := pool[index%poolLen]
- if host.Available() {
- return host
- }
- }
- return nil
-}
-
-// hash calculates a hash based on string s
-func hash(s string) uint32 {
- h := fnv.New32a()
- h.Write([]byte(s))
- return h.Sum32()
-}
-
-// IPHash is a policy that selects hosts based on hashing the request IP
-type IPHash struct{}
-
-// Select selects an up host from the pool based on hashing the request IP
-func (r *IPHash) Select(pool HostPool, request *http.Request) *UpstreamHost {
- clientIP, _, err := net.SplitHostPort(request.RemoteAddr)
- if err != nil {
- clientIP = request.RemoteAddr
- }
- return hostByHashing(pool, clientIP)
-}
-
-// URIHash is a policy that selects the host based on hashing the request URI
-type URIHash struct{}
-
-// Select selects the host based on hashing the URI
-func (r *URIHash) Select(pool HostPool, request *http.Request) *UpstreamHost {
- return hostByHashing(pool, request.RequestURI)
-}
-
-// First is a policy that selects the first available host
-type First struct{}
-
-// Select selects the first available host from the pool
-func (r *First) Select(pool HostPool, request *http.Request) *UpstreamHost {
- for _, host := range pool {
- if host.Available() {
- return host
- }
- }
- return nil
-}
-
-// Header is a policy that selects based on a hash of the given header
-type Header struct {
- // The name of the request header, the value of which will determine
- // how the request is routed
- Name string
-}
-
-// Select selects the host based on hashing the header value
-func (r *Header) Select(pool HostPool, request *http.Request) *UpstreamHost {
- if r.Name == "" {
- return nil
- }
- val := request.Header.Get(r.Name)
- if val == "" {
- return nil
- }
- return hostByHashing(pool, val)
-}
diff --git a/caddyhttp/proxy/policy_test.go b/caddyhttp/proxy/policy_test.go
deleted file mode 100644
index 6acf1e085dc..00000000000
--- a/caddyhttp/proxy/policy_test.go
+++ /dev/null
@@ -1,343 +0,0 @@
-package proxy
-
-import (
- "net/http"
- "net/http/httptest"
- "os"
- "testing"
-)
-
-var workableServer *httptest.Server
-
-func TestMain(m *testing.M) {
- workableServer = httptest.NewServer(http.HandlerFunc(
- func(w http.ResponseWriter, r *http.Request) {
- // do nothing
- }))
- r := m.Run()
- workableServer.Close()
- os.Exit(r)
-}
-
-type customPolicy struct{}
-
-func (r *customPolicy) Select(pool HostPool, request *http.Request) *UpstreamHost {
- return pool[0]
-}
-
-func testPool() HostPool {
- pool := []*UpstreamHost{
- {
- Name: workableServer.URL, // this should resolve (healthcheck test)
- },
- {
- Name: "http://localhost:99998", // this shouldn't
- },
- {
- Name: "http://C",
- },
- }
- return HostPool(pool)
-}
-
-func TestRoundRobinPolicy(t *testing.T) {
- pool := testPool()
- rrPolicy := &RoundRobin{}
- request, _ := http.NewRequest("GET", "/", nil)
-
- h := rrPolicy.Select(pool, request)
- // First selected host is 1, because counter starts at 0
- // and increments before host is selected
- if h != pool[1] {
- t.Error("Expected first round robin host to be second host in the pool.")
- }
- h = rrPolicy.Select(pool, request)
- if h != pool[2] {
- t.Error("Expected second round robin host to be third host in the pool.")
- }
- h = rrPolicy.Select(pool, request)
- if h != pool[0] {
- t.Error("Expected third round robin host to be first host in the pool.")
- }
- // mark host as down
- pool[1].Unhealthy = 1
- h = rrPolicy.Select(pool, request)
- if h != pool[2] {
- t.Error("Expected to skip down host.")
- }
- // mark host as up
- pool[1].Unhealthy = 0
-
- h = rrPolicy.Select(pool, request)
- if h == pool[2] {
- t.Error("Expected to balance evenly among healthy hosts")
- }
- // mark host as full
- pool[1].Conns = 1
- pool[1].MaxConns = 1
- h = rrPolicy.Select(pool, request)
- if h != pool[2] {
- t.Error("Expected to skip full host.")
- }
-}
-
-func TestLeastConnPolicy(t *testing.T) {
- pool := testPool()
- lcPolicy := &LeastConn{}
- request, _ := http.NewRequest("GET", "/", nil)
-
- pool[0].Conns = 10
- pool[1].Conns = 10
- h := lcPolicy.Select(pool, request)
- if h != pool[2] {
- t.Error("Expected least connection host to be third host.")
- }
- pool[2].Conns = 100
- h = lcPolicy.Select(pool, request)
- if h != pool[0] && h != pool[1] {
- t.Error("Expected least connection host to be first or second host.")
- }
-}
-
-func TestCustomPolicy(t *testing.T) {
- pool := testPool()
- customPolicy := &customPolicy{}
- request, _ := http.NewRequest("GET", "/", nil)
-
- h := customPolicy.Select(pool, request)
- if h != pool[0] {
- t.Error("Expected custom policy host to be the first host.")
- }
-}
-
-func TestIPHashPolicy(t *testing.T) {
- pool := testPool()
- ipHash := &IPHash{}
- request, _ := http.NewRequest("GET", "/", nil)
- // We should be able to predict where every request is routed.
- request.RemoteAddr = "172.0.0.1:80"
- h := ipHash.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected ip hash policy host to be the second host.")
- }
- request.RemoteAddr = "172.0.0.2:80"
- h = ipHash.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected ip hash policy host to be the second host.")
- }
- request.RemoteAddr = "172.0.0.3:80"
- h = ipHash.Select(pool, request)
- if h != pool[2] {
- t.Error("Expected ip hash policy host to be the third host.")
- }
- request.RemoteAddr = "172.0.0.4:80"
- h = ipHash.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected ip hash policy host to be the second host.")
- }
-
- // we should get the same results without a port
- request.RemoteAddr = "172.0.0.1"
- h = ipHash.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected ip hash policy host to be the second host.")
- }
- request.RemoteAddr = "172.0.0.2"
- h = ipHash.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected ip hash policy host to be the second host.")
- }
- request.RemoteAddr = "172.0.0.3"
- h = ipHash.Select(pool, request)
- if h != pool[2] {
- t.Error("Expected ip hash policy host to be the third host.")
- }
- request.RemoteAddr = "172.0.0.4"
- h = ipHash.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected ip hash policy host to be the second host.")
- }
-
- // we should get a healthy host if the original host is unhealthy and a
- // healthy host is available
- request.RemoteAddr = "172.0.0.1"
- pool[1].Unhealthy = 1
- h = ipHash.Select(pool, request)
- if h != pool[2] {
- t.Error("Expected ip hash policy host to be the third host.")
- }
-
- request.RemoteAddr = "172.0.0.2"
- h = ipHash.Select(pool, request)
- if h != pool[2] {
- t.Error("Expected ip hash policy host to be the third host.")
- }
- pool[1].Unhealthy = 0
-
- request.RemoteAddr = "172.0.0.3"
- pool[2].Unhealthy = 1
- h = ipHash.Select(pool, request)
- if h != pool[0] {
- t.Error("Expected ip hash policy host to be the first host.")
- }
- request.RemoteAddr = "172.0.0.4"
- h = ipHash.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected ip hash policy host to be the second host.")
- }
-
- // We should be able to resize the host pool and still be able to predict
- // where a request will be routed with the same IP's used above
- pool = []*UpstreamHost{
- {
- Name: workableServer.URL, // this should resolve (healthcheck test)
- },
- {
- Name: "http://localhost:99998", // this shouldn't
- },
- }
- pool = HostPool(pool)
- request.RemoteAddr = "172.0.0.1:80"
- h = ipHash.Select(pool, request)
- if h != pool[0] {
- t.Error("Expected ip hash policy host to be the first host.")
- }
- request.RemoteAddr = "172.0.0.2:80"
- h = ipHash.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected ip hash policy host to be the second host.")
- }
- request.RemoteAddr = "172.0.0.3:80"
- h = ipHash.Select(pool, request)
- if h != pool[0] {
- t.Error("Expected ip hash policy host to be the first host.")
- }
- request.RemoteAddr = "172.0.0.4:80"
- h = ipHash.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected ip hash policy host to be the second host.")
- }
-
- // We should get nil when there are no healthy hosts
- pool[0].Unhealthy = 1
- pool[1].Unhealthy = 1
- h = ipHash.Select(pool, request)
- if h != nil {
- t.Error("Expected ip hash policy host to be nil.")
- }
-}
-
-func TestFirstPolicy(t *testing.T) {
- pool := testPool()
- firstPolicy := &First{}
- req := httptest.NewRequest(http.MethodGet, "/", nil)
-
- h := firstPolicy.Select(pool, req)
- if h != pool[0] {
- t.Error("Expected first policy host to be the first host.")
- }
-
- pool[0].Unhealthy = 1
- h = firstPolicy.Select(pool, req)
- if h != pool[1] {
- t.Error("Expected first policy host to be the second host.")
- }
-}
-
-func TestUriPolicy(t *testing.T) {
- pool := testPool()
- uriPolicy := &URIHash{}
-
- request := httptest.NewRequest(http.MethodGet, "/test", nil)
- h := uriPolicy.Select(pool, request)
- if h != pool[0] {
- t.Error("Expected uri policy host to be the first host.")
- }
-
- pool[0].Unhealthy = 1
- h = uriPolicy.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected uri policy host to be the first host.")
- }
-
- request = httptest.NewRequest(http.MethodGet, "/test_2", nil)
- h = uriPolicy.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected uri policy host to be the second host.")
- }
-
- // We should be able to resize the host pool and still be able to predict
- // where a request will be routed with the same URI's used above
- pool = []*UpstreamHost{
- {
- Name: workableServer.URL, // this should resolve (healthcheck test)
- },
- {
- Name: "http://localhost:99998", // this shouldn't
- },
- }
-
- request = httptest.NewRequest(http.MethodGet, "/test", nil)
- h = uriPolicy.Select(pool, request)
- if h != pool[0] {
- t.Error("Expected uri policy host to be the first host.")
- }
-
- pool[0].Unhealthy = 1
- h = uriPolicy.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected uri policy host to be the first host.")
- }
-
- request = httptest.NewRequest(http.MethodGet, "/test_2", nil)
- h = uriPolicy.Select(pool, request)
- if h != pool[1] {
- t.Error("Expected uri policy host to be the second host.")
- }
-
- pool[0].Unhealthy = 1
- pool[1].Unhealthy = 1
- h = uriPolicy.Select(pool, request)
- if h != nil {
- t.Error("Expected uri policy policy host to be nil.")
- }
-}
-
-func TestHeaderPolicy(t *testing.T) {
- pool := testPool()
- tests := []struct {
- Policy *Header
- RequestHeaderName string
- RequestHeaderValue string
- NilHost bool
- HostIndex int
- }{
- {&Header{""}, "", "", true, 0},
- {&Header{""}, "Affinity", "somevalue", true, 0},
- {&Header{""}, "Affinity", "", true, 0},
-
- {&Header{"Affinity"}, "", "", true, 0},
- {&Header{"Affinity"}, "Affinity", "somevalue", false, 1},
- {&Header{"Affinity"}, "Affinity", "somevalue2", false, 0},
- {&Header{"Affinity"}, "Affinity", "somevalue3", false, 2},
- {&Header{"Affinity"}, "Affinity", "", true, 0},
- }
-
- for idx, test := range tests {
- request, _ := http.NewRequest("GET", "/", nil)
- if test.RequestHeaderName != "" {
- request.Header.Add(test.RequestHeaderName, test.RequestHeaderValue)
- }
-
- host := test.Policy.Select(pool, request)
- if test.NilHost && host != nil {
- t.Errorf("%d: Expected host to be nil", idx)
- }
- if !test.NilHost && host == nil {
- t.Errorf("%d: Did not expect host to be nil", idx)
- }
- if !test.NilHost && host != pool[test.HostIndex] {
- t.Errorf("%d: Expected Header policy to be host %d", idx, test.HostIndex)
- }
- }
-}
diff --git a/caddyhttp/proxy/proxy.go b/caddyhttp/proxy/proxy.go
deleted file mode 100644
index 56159f9fb19..00000000000
--- a/caddyhttp/proxy/proxy.go
+++ /dev/null
@@ -1,369 +0,0 @@
-// Package proxy is middleware that proxies HTTP requests.
-package proxy
-
-import (
- "context"
- "errors"
- "net"
- "net/http"
- "net/url"
- "strings"
- "sync/atomic"
- "time"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// Proxy represents a middleware instance that can proxy requests.
-type Proxy struct {
- Next httpserver.Handler
- Upstreams []Upstream
-}
-
-// Upstream manages a pool of proxy upstream hosts.
-type Upstream interface {
- // The path this upstream host should be routed on
- From() string
-
- // Selects an upstream host to be routed to. It
- // should return a suitable upstream host, or nil
- // if no such hosts are available.
- Select(*http.Request) *UpstreamHost
-
- // Checks if subpath is not an ignored path
- AllowedPath(string) bool
-
- // Gets how long to try selecting upstream hosts
- // in the case of cascading failures.
- GetTryDuration() time.Duration
-
- // Gets how long to wait between selecting upstream
- // hosts in the case of cascading failures.
- GetTryInterval() time.Duration
-
- // Gets the number of upstream hosts.
- GetHostCount() int
-
- // Stops the upstream from proxying requests to shutdown goroutines cleanly.
- Stop() error
-}
-
-// UpstreamHostDownFunc can be used to customize how Down behaves.
-type UpstreamHostDownFunc func(*UpstreamHost) bool
-
-// UpstreamHost represents a single proxy upstream
-type UpstreamHost struct {
- // This field is read & written to concurrently, so all access must use
- // atomic operations.
- Conns int64 // must be first field to be 64-bit aligned on 32-bit systems
- MaxConns int64
- Name string // hostname of this upstream host
- UpstreamHeaders http.Header
- DownstreamHeaders http.Header
- FailTimeout time.Duration
- CheckDown UpstreamHostDownFunc
- WithoutPathPrefix string
- ReverseProxy *ReverseProxy
- Fails int32
- // This is an int32 so that we can use atomic operations to do concurrent
- // reads & writes to this value. The default value of 0 indicates that it
- // is healthy and any non-zero value indicates unhealthy.
- Unhealthy int32
-}
-
-// Down checks whether the upstream host is down or not.
-// Down will try to use uh.CheckDown first, and will fall
-// back to some default criteria if necessary.
-func (uh *UpstreamHost) Down() bool {
- if uh.CheckDown == nil {
- // Default settings
- return atomic.LoadInt32(&uh.Unhealthy) != 0 || atomic.LoadInt32(&uh.Fails) > 0
- }
- return uh.CheckDown(uh)
-}
-
-// Full checks whether the upstream host has reached its maximum connections
-func (uh *UpstreamHost) Full() bool {
- return uh.MaxConns > 0 && atomic.LoadInt64(&uh.Conns) >= uh.MaxConns
-}
-
-// Available checks whether the upstream host is available for proxying to
-func (uh *UpstreamHost) Available() bool {
- return !uh.Down() && !uh.Full()
-}
-
-// ServeHTTP satisfies the httpserver.Handler interface.
-func (p Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- // start by selecting most specific matching upstream config
- upstream := p.match(r)
- if upstream == nil {
- return p.Next.ServeHTTP(w, r)
- }
-
- // this replacer is used to fill in header field values
- replacer := httpserver.NewReplacer(r, nil, "")
-
- // outreq is the request that makes a roundtrip to the backend
- outreq, cancel := createUpstreamRequest(w, r)
- defer cancel()
-
- // If we have more than one upstream host defined and if retrying is enabled
- // by setting try_duration to a non-zero value, caddy will try to
- // retry the request at a different host if the first one failed.
- //
- // This requires us to possibly rewind and replay the request body though,
- // which in turn requires us to buffer the request body first.
- //
- // An unbuffered request is usually preferrable, because it reduces latency
- // as well as memory usage. Furthermore it enables different kinds of
- // HTTP streaming applications like gRPC for instance.
- requiresBuffering := upstream.GetHostCount() > 1 && upstream.GetTryDuration() != 0
-
- if requiresBuffering {
- body, err := newBufferedBody(outreq.Body)
- if err != nil {
- return http.StatusBadRequest, errors.New("failed to read downstream request body")
- }
- if body != nil {
- outreq.Body = body
- }
- }
-
- // The keepRetrying function will return true if we should
- // loop and try to select another host, or false if we
- // should break and stop retrying.
- start := time.Now()
- keepRetrying := func(backendErr error) bool {
- // if downstream has canceled the request, break
- if backendErr == context.Canceled {
- return false
- }
- // if we've tried long enough, break
- if time.Since(start) >= upstream.GetTryDuration() {
- return false
- }
- // otherwise, wait and try the next available host
- time.Sleep(upstream.GetTryInterval())
- return true
- }
-
- var backendErr error
- for {
- // since Select() should give us "up" hosts, keep retrying
- // hosts until timeout (or until we get a nil host).
- host := upstream.Select(r)
- if host == nil {
- if backendErr == nil {
- backendErr = errors.New("no hosts available upstream")
- }
- if !keepRetrying(backendErr) {
- break
- }
- continue
- }
- if rr, ok := w.(*httpserver.ResponseRecorder); ok && rr.Replacer != nil {
- rr.Replacer.Set("upstream", host.Name)
- }
-
- proxy := host.ReverseProxy
-
- // a backend's name may contain more than just the host,
- // so we parse it as a URL to try to isolate the host.
- if nameURL, err := url.Parse(host.Name); err == nil {
- outreq.Host = nameURL.Host
- if proxy == nil {
- proxy = NewSingleHostReverseProxy(nameURL, host.WithoutPathPrefix, http.DefaultMaxIdleConnsPerHost)
- }
-
- // use upstream credentials by default
- if outreq.Header.Get("Authorization") == "" && nameURL.User != nil {
- pwd, _ := nameURL.User.Password()
- outreq.SetBasicAuth(nameURL.User.Username(), pwd)
- }
- } else {
- outreq.Host = host.Name
- }
- if proxy == nil {
- return http.StatusInternalServerError, errors.New("proxy for host '" + host.Name + "' is nil")
- }
-
- // set headers for request going upstream
- if host.UpstreamHeaders != nil {
- // modify headers for request that will be sent to the upstream host
- mutateHeadersByRules(outreq.Header, host.UpstreamHeaders, replacer)
- if hostHeaders, ok := outreq.Header["Host"]; ok && len(hostHeaders) > 0 {
- outreq.Host = hostHeaders[len(hostHeaders)-1]
- }
- }
-
- // prepare a function that will update response
- // headers coming back downstream
- var downHeaderUpdateFn respUpdateFn
- if host.DownstreamHeaders != nil {
- downHeaderUpdateFn = createRespHeaderUpdateFn(host.DownstreamHeaders, replacer)
- }
-
- // Before we retry the request we have to make sure
- // that the body is rewound to it's beginning.
- if bb, ok := outreq.Body.(*bufferedBody); ok {
- if err := bb.rewind(); err != nil {
- return http.StatusInternalServerError, errors.New("unable to rewind downstream request body")
- }
- }
-
- // tell the proxy to serve the request
- //
- // NOTE:
- // The call to proxy.ServeHTTP can theoretically panic.
- // To prevent host.Conns from getting out-of-sync we thus have to
- // make sure that it's _always_ correctly decremented afterwards.
- func() {
- atomic.AddInt64(&host.Conns, 1)
- defer atomic.AddInt64(&host.Conns, -1)
- backendErr = proxy.ServeHTTP(w, outreq, downHeaderUpdateFn)
- }()
-
- // if no errors, we're done here
- if backendErr == nil {
- return 0, nil
- }
-
- if backendErr == httpserver.ErrMaxBytesExceeded {
- return http.StatusRequestEntityTooLarge, backendErr
- }
-
- // failover; remember this failure for some time if
- // request failure counting is enabled
- timeout := host.FailTimeout
- if timeout > 0 {
- atomic.AddInt32(&host.Fails, 1)
- go func(host *UpstreamHost, timeout time.Duration) {
- time.Sleep(timeout)
- atomic.AddInt32(&host.Fails, -1)
- }(host, timeout)
- }
-
- // if we've tried long enough, break
- if !keepRetrying(backendErr) {
- break
- }
- }
-
- return http.StatusBadGateway, backendErr
-}
-
-// match finds the best match for a proxy config based on r.
-func (p Proxy) match(r *http.Request) Upstream {
- var u Upstream
- var longestMatch int
- for _, upstream := range p.Upstreams {
- basePath := upstream.From()
- if !httpserver.Path(r.URL.Path).Matches(basePath) || !upstream.AllowedPath(r.URL.Path) {
- continue
- }
- if len(basePath) > longestMatch {
- longestMatch = len(basePath)
- u = upstream
- }
- }
- return u
-}
-
-// createUpstremRequest shallow-copies r into a new request
-// that can be sent upstream.
-//
-// Derived from reverseproxy.go in the standard Go httputil package.
-func createUpstreamRequest(rw http.ResponseWriter, r *http.Request) (*http.Request, context.CancelFunc) {
- // Original incoming server request may be canceled by the
- // user or by std lib(e.g. too many idle connections).
- ctx, cancel := context.WithCancel(r.Context())
- if cn, ok := rw.(http.CloseNotifier); ok {
- notifyChan := cn.CloseNotify()
- go func() {
- select {
- case <-notifyChan:
- cancel()
- case <-ctx.Done():
- }
- }()
- }
-
- outreq := r.WithContext(ctx) // includes shallow copies of maps, but okay
-
- // We should set body to nil explicitly if request body is empty.
- // For server requests the Request Body is always non-nil.
- if r.ContentLength == 0 {
- outreq.Body = nil
- }
-
- // We are modifying the same underlying map from req (shallow
- // copied above) so we only copy it if necessary.
- copiedHeaders := false
-
- // Remove hop-by-hop headers listed in the "Connection" header.
- // See RFC 2616, section 14.10.
- if c := outreq.Header.Get("Connection"); c != "" {
- for _, f := range strings.Split(c, ",") {
- if f = strings.TrimSpace(f); f != "" {
- if !copiedHeaders {
- outreq.Header = make(http.Header)
- copyHeader(outreq.Header, r.Header)
- copiedHeaders = true
- }
- outreq.Header.Del(f)
- }
- }
- }
-
- // Remove hop-by-hop headers to the backend. Especially
- // important is "Connection" because we want a persistent
- // connection, regardless of what the client sent to us.
- for _, h := range hopHeaders {
- if outreq.Header.Get(h) != "" {
- if !copiedHeaders {
- outreq.Header = make(http.Header)
- copyHeader(outreq.Header, r.Header)
- copiedHeaders = true
- }
- outreq.Header.Del(h)
- }
- }
-
- if clientIP, _, err := net.SplitHostPort(r.RemoteAddr); err == nil {
- // If we aren't the first proxy, retain prior
- // X-Forwarded-For information as a comma+space
- // separated list and fold multiple headers into one.
- if prior, ok := outreq.Header["X-Forwarded-For"]; ok {
- clientIP = strings.Join(prior, ", ") + ", " + clientIP
- }
- outreq.Header.Set("X-Forwarded-For", clientIP)
- }
-
- return outreq, cancel
-}
-
-func createRespHeaderUpdateFn(rules http.Header, replacer httpserver.Replacer) respUpdateFn {
- return func(resp *http.Response) {
- mutateHeadersByRules(resp.Header, rules, replacer)
- }
-}
-
-func mutateHeadersByRules(headers, rules http.Header, repl httpserver.Replacer) {
- for ruleField, ruleValues := range rules {
- if strings.HasPrefix(ruleField, "+") {
- for _, ruleValue := range ruleValues {
- replacement := repl.Replace(ruleValue)
- if len(replacement) > 0 {
- headers.Add(strings.TrimPrefix(ruleField, "+"), replacement)
- }
- }
- } else if strings.HasPrefix(ruleField, "-") {
- headers.Del(strings.TrimPrefix(ruleField, "-"))
- } else if len(ruleValues) > 0 {
- replacement := repl.Replace(ruleValues[len(ruleValues)-1])
- if len(replacement) > 0 {
- headers.Set(ruleField, replacement)
- }
- }
- }
-}
diff --git a/caddyhttp/proxy/proxy_test.go b/caddyhttp/proxy/proxy_test.go
deleted file mode 100644
index d7342560822..00000000000
--- a/caddyhttp/proxy/proxy_test.go
+++ /dev/null
@@ -1,1472 +0,0 @@
-package proxy
-
-import (
- "bufio"
- "bytes"
- "context"
- "crypto/tls"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/http/httptest"
- "net/url"
- "os"
- "path/filepath"
- "reflect"
- "runtime"
- "strings"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/mholt/caddy/caddyfile"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-
- "golang.org/x/net/websocket"
-)
-
-// This is a simple wrapper around httptest.NewTLSServer()
-// which forcefully enables (among others) HTTP/2 support.
-// The httptest package only supports HTTP/1.1 by default.
-func newTLSServer(handler http.Handler) *httptest.Server {
- ts := httptest.NewUnstartedServer(handler)
- ts.TLS = new(tls.Config)
- ts.TLS.NextProtos = []string{"h2"}
- ts.StartTLS()
- return ts
-}
-
-func TestReverseProxy(t *testing.T) {
- log.SetOutput(ioutil.Discard)
- defer log.SetOutput(os.Stderr)
-
- testHeaderValue := []string{"header-value"}
- testHeaders := http.Header{
- "X-Header-1": testHeaderValue,
- "X-Header-2": testHeaderValue,
- "X-Header-3": testHeaderValue,
- }
- testTrailerValue := []string{"trailer-value"}
- testTrailers := http.Header{
- "X-Trailer-1": testTrailerValue,
- "X-Trailer-2": testTrailerValue,
- "X-Trailer-3": testTrailerValue,
- }
- verifyHeaderValues := func(actual http.Header, expected http.Header) bool {
- if actual == nil {
- t.Error("Expected headers")
- return true
- }
-
- for k := range expected {
- if expected.Get(k) != actual.Get(k) {
- t.Errorf("Expected header '%s' to be proxied properly", k)
- return true
- }
- }
-
- return false
- }
- verifyHeadersTrailers := func(headers http.Header, trailers http.Header) {
- if verifyHeaderValues(headers, testHeaders) || verifyHeaderValues(trailers, testTrailers) {
- t.FailNow()
- }
- }
-
- requestReceived := false
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- // read the body (even if it's empty) to make Go parse trailers
- io.Copy(ioutil.Discard, r.Body)
-
- verifyHeadersTrailers(r.Header, r.Trailer)
- requestReceived = true
-
- // Set headers.
- copyHeader(w.Header(), testHeaders)
-
- // Only announce one of the trailers to test wether
- // unannounced trailers are proxied correctly.
- for k := range testTrailers {
- w.Header().Set("Trailer", k)
- break
- }
-
- w.WriteHeader(http.StatusOK)
- w.Write([]byte("Hello, client"))
-
- // Set trailers.
- shallowCopyTrailers(w.Header(), testTrailers, true)
- }))
- defer backend.Close()
-
- // set up proxy
- p := &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: []Upstream{newFakeUpstream(backend.URL, false)},
- }
-
- // Create the fake request body.
- // This will copy "trailersToSet" to r.Trailer right before it is closed and
- // thus test for us wether unannounced client trailers are proxied correctly.
- body := &trailerTestStringReader{
- Reader: *strings.NewReader("test"),
- trailersToSet: testTrailers,
- }
-
- // Create the fake request with the above body.
- r := httptest.NewRequest("GET", "/", body)
- r.Trailer = make(http.Header)
- body.request = r
-
- copyHeader(r.Header, testHeaders)
-
- // Only announce one of the trailers to test wether
- // unannounced trailers are proxied correctly.
- for k, v := range testTrailers {
- r.Trailer[k] = v
- break
- }
-
- w := httptest.NewRecorder()
- p.ServeHTTP(w, r)
- res := w.Result()
-
- if !requestReceived {
- t.Error("Expected backend to receive request, but it didn't")
- }
-
- verifyHeadersTrailers(res.Header, res.Trailer)
-
- // Make sure {upstream} placeholder is set
- r.Body = ioutil.NopCloser(strings.NewReader("test"))
- rr := httpserver.NewResponseRecorder(testResponseRecorder{
- ResponseWriterWrapper: &httpserver.ResponseWriterWrapper{ResponseWriter: httptest.NewRecorder()},
- })
- rr.Replacer = httpserver.NewReplacer(r, rr, "-")
-
- p.ServeHTTP(rr, r)
-
- if got, want := rr.Replacer.Replace("{upstream}"), backend.URL; got != want {
- t.Errorf("Expected custom placeholder {upstream} to be set (%s), but it wasn't; got: %s", want, got)
- }
-}
-
-// trailerTestStringReader is used to test unannounced trailers coming
-// from a client which should properly be proxied to the upstream.
-type trailerTestStringReader struct {
- strings.Reader
- request *http.Request
- trailersToSet http.Header
-}
-
-var _ io.ReadCloser = &trailerTestStringReader{}
-
-func (r *trailerTestStringReader) Close() error {
- copyHeader(r.request.Trailer, r.trailersToSet)
- return nil
-}
-
-func TestReverseProxyInsecureSkipVerify(t *testing.T) {
- log.SetOutput(ioutil.Discard)
- defer log.SetOutput(os.Stderr)
-
- var requestReceived bool
- var requestWasHTTP2 bool
- backend := newTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- requestReceived = true
- requestWasHTTP2 = r.ProtoAtLeast(2, 0)
- w.Write([]byte("Hello, client"))
- }))
- defer backend.Close()
-
- // set up proxy
- p := &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: []Upstream{newFakeUpstream(backend.URL, true)},
- }
-
- // create request and response recorder
- r := httptest.NewRequest("GET", "/", nil)
- w := httptest.NewRecorder()
-
- p.ServeHTTP(w, r)
-
- if !requestReceived {
- t.Error("Even with insecure HTTPS, expected backend to receive request, but it didn't")
- }
- if !requestWasHTTP2 {
- t.Error("Even with insecure HTTPS, expected proxy to use HTTP/2")
- }
-}
-
-// This test will fail when using the race detector without atomic reads &
-// writes of UpstreamHost.Conns and UpstreamHost.Unhealthy.
-func TestReverseProxyMaxConnLimit(t *testing.T) {
- log.SetOutput(ioutil.Discard)
- defer log.SetOutput(os.Stderr)
-
- const MaxTestConns = 2
- connReceived := make(chan bool, MaxTestConns)
- connContinue := make(chan bool)
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- connReceived <- true
- <-connContinue
- }))
- defer backend.Close()
-
- su, err := NewStaticUpstreams(caddyfile.NewDispenser("Testfile", strings.NewReader(`
- proxy / `+backend.URL+` {
- max_conns `+fmt.Sprint(MaxTestConns)+`
- }
- `)), "")
- if err != nil {
- t.Fatal(err)
- }
-
- // set up proxy
- p := &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: su,
- }
-
- var jobs sync.WaitGroup
-
- for i := 0; i < MaxTestConns; i++ {
- jobs.Add(1)
- go func(i int) {
- defer jobs.Done()
- w := httptest.NewRecorder()
- code, err := p.ServeHTTP(w, httptest.NewRequest("GET", "/", nil))
- if err != nil {
- t.Errorf("Request %d failed: %v", i, err)
- } else if code != 0 {
- t.Errorf("Bad return code for request %d: %d", i, code)
- } else if w.Code != 200 {
- t.Errorf("Bad statuc code for request %d: %d", i, w.Code)
- }
- }(i)
- }
- // Wait for all the requests to hit the backend.
- for i := 0; i < MaxTestConns; i++ {
- <-connReceived
- }
-
- // Now we should have MaxTestConns requests connected and sitting on the backend
- // server. Verify that the next request is rejected.
- w := httptest.NewRecorder()
- code, err := p.ServeHTTP(w, httptest.NewRequest("GET", "/", nil))
- if code != http.StatusBadGateway {
- t.Errorf("Expected request to be rejected, but got: %d [%v]\nStatus code: %d",
- code, err, w.Code)
- }
-
- // Now let all the requests complete and verify the status codes for those:
- close(connContinue)
-
- // Wait for the initial requests to finish and check their results.
- jobs.Wait()
-}
-
-func TestWebSocketReverseProxyNonHijackerPanic(t *testing.T) {
- // Capture the expected panic
- defer func() {
- r := recover()
- if _, ok := r.(httpserver.NonHijackerError); !ok {
- t.Error("not get the expected panic")
- }
- }()
-
- var connCount int32
- wsNop := httptest.NewServer(websocket.Handler(func(ws *websocket.Conn) { atomic.AddInt32(&connCount, 1) }))
- defer wsNop.Close()
-
- // Get proxy to use for the test
- p := newWebSocketTestProxy(wsNop.URL, false)
-
- // Create client request
- r := httptest.NewRequest("GET", "/", nil)
-
- r.Header = http.Header{
- "Connection": {"Upgrade"},
- "Upgrade": {"websocket"},
- "Origin": {wsNop.URL},
- "Sec-WebSocket-Key": {"x3JJHMbDL1EzLkh9GBhXDw=="},
- "Sec-WebSocket-Version": {"13"},
- }
-
- nonHijacker := httptest.NewRecorder()
- p.ServeHTTP(nonHijacker, r)
-}
-
-func TestWebSocketReverseProxyServeHTTPHandler(t *testing.T) {
- // No-op websocket backend simply allows the WS connection to be
- // accepted then it will be immediately closed. Perfect for testing.
- accepted := make(chan struct{})
- wsNop := httptest.NewServer(websocket.Handler(func(ws *websocket.Conn) { close(accepted) }))
- defer wsNop.Close()
-
- // Get proxy to use for the test
- p := newWebSocketTestProxy(wsNop.URL, false)
-
- // Create client request
- r := httptest.NewRequest("GET", "/", nil)
-
- r.Header = http.Header{
- "Connection": {"Upgrade"},
- "Upgrade": {"websocket"},
- "Origin": {wsNop.URL},
- "Sec-WebSocket-Key": {"x3JJHMbDL1EzLkh9GBhXDw=="},
- "Sec-WebSocket-Version": {"13"},
- }
-
- // Capture the request
- w := &recorderHijacker{httptest.NewRecorder(), new(fakeConn)}
-
- // Booya! Do the test.
- p.ServeHTTP(w, r)
-
- // Make sure the backend accepted the WS connection.
- // Mostly interested in the Upgrade and Connection response headers
- // and the 101 status code.
- expected := []byte("HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: HSmrc0sMlYUkAGmm5OPpG2HaGWk=\r\n\r\n")
- actual := w.fakeConn.writeBuf.Bytes()
- if !bytes.Equal(actual, expected) {
- t.Errorf("Expected backend to accept response:\n'%s'\nActually got:\n'%s'", expected, actual)
- }
-
- // wait a minute for backend handling, see issue 1654.
- time.Sleep(10 * time.Millisecond)
-
- select {
- case <-accepted:
- default:
- t.Error("Expect a accepted websocket connection, but not")
- }
-}
-
-func TestWebSocketReverseProxyFromWSClient(t *testing.T) {
- // Echo server allows us to test that socket bytes are properly
- // being proxied.
- wsEcho := httptest.NewServer(websocket.Handler(func(ws *websocket.Conn) {
- io.Copy(ws, ws)
- }))
- defer wsEcho.Close()
-
- // Get proxy to use for the test
- p := newWebSocketTestProxy(wsEcho.URL, false)
-
- // This is a full end-end test, so the proxy handler
- // has to be part of a server listening on a port. Our
- // WS client will connect to this test server, not
- // the echo client directly.
- echoProxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- p.ServeHTTP(w, r)
- }))
- defer echoProxy.Close()
-
- // Set up WebSocket client
- url := strings.Replace(echoProxy.URL, "http://", "ws://", 1)
- ws, err := websocket.Dial(url, "", echoProxy.URL)
-
- if err != nil {
- t.Fatal(err)
- }
- defer ws.Close()
-
- // Send test message
- trialMsg := "Is it working?"
-
- if sendErr := websocket.Message.Send(ws, trialMsg); sendErr != nil {
- t.Fatal(sendErr)
- }
-
- // It should be echoed back to us
- var actualMsg string
-
- if rcvErr := websocket.Message.Receive(ws, &actualMsg); rcvErr != nil {
- t.Fatal(rcvErr)
- }
-
- if actualMsg != trialMsg {
- t.Errorf("Expected '%s' but got '%s' instead", trialMsg, actualMsg)
- }
-}
-
-func TestWebSocketReverseProxyFromWSSClient(t *testing.T) {
- wsEcho := newTLSServer(websocket.Handler(func(ws *websocket.Conn) {
- io.Copy(ws, ws)
- }))
- defer wsEcho.Close()
-
- p := newWebSocketTestProxy(wsEcho.URL, true)
-
- echoProxy := newTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- p.ServeHTTP(w, r)
- }))
- defer echoProxy.Close()
-
- // Set up WebSocket client
- url := strings.Replace(echoProxy.URL, "https://", "wss://", 1)
- wsCfg, err := websocket.NewConfig(url, echoProxy.URL)
- if err != nil {
- t.Fatal(err)
- }
- wsCfg.TlsConfig = &tls.Config{InsecureSkipVerify: true}
- ws, err := websocket.DialConfig(wsCfg)
-
- if err != nil {
- t.Fatal(err)
- }
- defer ws.Close()
-
- // Send test message
- trialMsg := "Is it working?"
-
- if sendErr := websocket.Message.Send(ws, trialMsg); sendErr != nil {
- t.Fatal(sendErr)
- }
-
- // It should be echoed back to us
- var actualMsg string
-
- if rcvErr := websocket.Message.Receive(ws, &actualMsg); rcvErr != nil {
- t.Fatal(rcvErr)
- }
-
- if actualMsg != trialMsg {
- t.Errorf("Expected '%s' but got '%s' instead", trialMsg, actualMsg)
- }
-}
-
-func TestUnixSocketProxy(t *testing.T) {
- if runtime.GOOS == "windows" {
- return
- }
-
- trialMsg := "Is it working?"
-
- var proxySuccess bool
-
- // This is our fake "application" we want to proxy to
- ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- // Request was proxied when this is called
- proxySuccess = true
-
- fmt.Fprint(w, trialMsg)
- }))
-
- // Get absolute path for unix: socket
- dir, err := ioutil.TempDir("", "caddy_proxytest")
- if err != nil {
- t.Fatalf("Failed to make temp dir to contain unix socket. %v", err)
- }
- defer os.RemoveAll(dir)
- socketPath := filepath.Join(dir, "test_socket")
-
- // Change httptest.Server listener to listen to unix: socket
- ln, err := net.Listen("unix", socketPath)
- if err != nil {
- t.Fatalf("Unable to listen: %v", err)
- }
- ts.Listener = ln
-
- ts.Start()
- defer ts.Close()
-
- url := strings.Replace(ts.URL, "http://", "unix:", 1)
- p := newWebSocketTestProxy(url, false)
-
- echoProxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- p.ServeHTTP(w, r)
- }))
- defer echoProxy.Close()
-
- res, err := http.Get(echoProxy.URL)
- if err != nil {
- t.Fatalf("Unable to GET: %v", err)
- }
-
- greeting, err := ioutil.ReadAll(res.Body)
- res.Body.Close()
- if err != nil {
- t.Fatalf("Unable to GET: %v", err)
- }
-
- actualMsg := fmt.Sprintf("%s", greeting)
-
- if !proxySuccess {
- t.Errorf("Expected request to be proxied, but it wasn't")
- }
-
- if actualMsg != trialMsg {
- t.Errorf("Expected '%s' but got '%s' instead", trialMsg, actualMsg)
- }
-}
-
-func GetHTTPProxy(messageFormat string, prefix string) (*Proxy, *httptest.Server) {
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprintf(w, messageFormat, r.URL.String())
- }))
-
- return newPrefixedWebSocketTestProxy(ts.URL, prefix), ts
-}
-
-func GetSocketProxy(messageFormat string, prefix string) (*Proxy, *httptest.Server, string, error) {
- ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprintf(w, messageFormat, r.URL.String())
- }))
-
- dir, err := ioutil.TempDir("", "caddy_proxytest")
- if err != nil {
- return nil, nil, dir, fmt.Errorf("Failed to make temp dir to contain unix socket. %v", err)
- }
- socketPath := filepath.Join(dir, "test_socket")
-
- ln, err := net.Listen("unix", socketPath)
- if err != nil {
- os.RemoveAll(dir)
- return nil, nil, dir, fmt.Errorf("Unable to listen: %v", err)
- }
- ts.Listener = ln
-
- ts.Start()
-
- tsURL := strings.Replace(ts.URL, "http://", "unix:", 1)
-
- return newPrefixedWebSocketTestProxy(tsURL, prefix), ts, dir, nil
-}
-
-func GetTestServerMessage(p *Proxy, ts *httptest.Server, path string) (string, error) {
- echoProxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- p.ServeHTTP(w, r)
- }))
-
- // *httptest.Server is passed so it can be `defer`red properly
- defer ts.Close()
- defer echoProxy.Close()
-
- res, err := http.Get(echoProxy.URL + path)
- if err != nil {
- return "", fmt.Errorf("Unable to GET: %v", err)
- }
-
- greeting, err := ioutil.ReadAll(res.Body)
- res.Body.Close()
- if err != nil {
- return "", fmt.Errorf("Unable to read body: %v", err)
- }
-
- return fmt.Sprintf("%s", greeting), nil
-}
-
-func TestUnixSocketProxyPaths(t *testing.T) {
- greeting := "Hello route %s"
-
- tests := []struct {
- url string
- prefix string
- expected string
- }{
- {"", "", fmt.Sprintf(greeting, "/")},
- {"/hello", "", fmt.Sprintf(greeting, "/hello")},
- {"/foo/bar", "", fmt.Sprintf(greeting, "/foo/bar")},
- {"/foo?bar", "", fmt.Sprintf(greeting, "/foo?bar")},
- {"/greet?name=john", "", fmt.Sprintf(greeting, "/greet?name=john")},
- {"/world?wonderful&colorful", "", fmt.Sprintf(greeting, "/world?wonderful&colorful")},
- {"/proxy/hello", "/proxy", fmt.Sprintf(greeting, "/hello")},
- {"/proxy/foo/bar", "/proxy", fmt.Sprintf(greeting, "/foo/bar")},
- {"/proxy/?foo=bar", "/proxy", fmt.Sprintf(greeting, "/?foo=bar")},
- {"/queues/%2F/fetchtasks", "", fmt.Sprintf(greeting, "/queues/%2F/fetchtasks")},
- {"/queues/%2F/fetchtasks?foo=bar", "", fmt.Sprintf(greeting, "/queues/%2F/fetchtasks?foo=bar")},
- }
-
- for _, test := range tests {
- p, ts := GetHTTPProxy(greeting, test.prefix)
-
- actualMsg, err := GetTestServerMessage(p, ts, test.url)
-
- if err != nil {
- t.Fatalf("Getting server message failed - %v", err)
- }
-
- if actualMsg != test.expected {
- t.Errorf("Expected '%s' but got '%s' instead", test.expected, actualMsg)
- }
- }
-
- if runtime.GOOS == "windows" {
- return
- }
-
- for _, test := range tests {
- p, ts, tmpdir, err := GetSocketProxy(greeting, test.prefix)
- if err != nil {
- t.Fatalf("Getting socket proxy failed - %v", err)
- }
-
- actualMsg, err := GetTestServerMessage(p, ts, test.url)
-
- if err != nil {
- os.RemoveAll(tmpdir)
- t.Fatalf("Getting server message failed - %v", err)
- }
-
- if actualMsg != test.expected {
- t.Errorf("Expected '%s' but got '%s' instead", test.expected, actualMsg)
- }
-
- os.RemoveAll(tmpdir)
- }
-}
-
-func TestUpstreamHeadersUpdate(t *testing.T) {
- log.SetOutput(ioutil.Discard)
- defer log.SetOutput(os.Stderr)
-
- var actualHeaders http.Header
- var actualHost string
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte("Hello, client"))
- actualHeaders = r.Header
- actualHost = r.Host
- }))
- defer backend.Close()
-
- upstream := newFakeUpstream(backend.URL, false)
- upstream.host.UpstreamHeaders = http.Header{
- "Connection": {"{>Connection}"},
- "Upgrade": {"{>Upgrade}"},
- "+Merge-Me": {"Merge-Value"},
- "+Add-Me": {"Add-Value"},
- "+Add-Empty": {"{}"},
- "-Remove-Me": {""},
- "Replace-Me": {"{hostname}"},
- "Clear-Me": {""},
- "Host": {"{>Host}"},
- }
- // set up proxy
- p := &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: []Upstream{upstream},
- }
-
- // create request and response recorder
- r := httptest.NewRequest("GET", "/", nil)
- w := httptest.NewRecorder()
-
- const expectHost = "example.com"
- //add initial headers
- r.Header.Add("Merge-Me", "Initial")
- r.Header.Add("Remove-Me", "Remove-Value")
- r.Header.Add("Replace-Me", "Replace-Value")
- r.Header.Add("Host", expectHost)
-
- p.ServeHTTP(w, r)
-
- replacer := httpserver.NewReplacer(r, nil, "")
-
- for headerKey, expect := range map[string][]string{
- "Merge-Me": {"Initial", "Merge-Value"},
- "Add-Me": {"Add-Value"},
- "Add-Empty": nil,
- "Remove-Me": nil,
- "Replace-Me": {replacer.Replace("{hostname}")},
- "Clear-Me": nil,
- } {
- if got := actualHeaders[headerKey]; !reflect.DeepEqual(got, expect) {
- t.Errorf("Upstream request does not contain expected %v header: expect %v, but got %v",
- headerKey, expect, got)
- }
- }
-
- if actualHost != expectHost {
- t.Errorf("Request sent to upstream backend should have value of Host with %s, but got %s", expectHost, actualHost)
- }
-
-}
-
-func TestDownstreamHeadersUpdate(t *testing.T) {
- log.SetOutput(ioutil.Discard)
- defer log.SetOutput(os.Stderr)
-
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Add("Merge-Me", "Initial")
- w.Header().Add("Remove-Me", "Remove-Value")
- w.Header().Add("Replace-Me", "Replace-Value")
- w.Header().Add("Content-Type", "text/html")
- w.Header().Add("Overwrite-Me", "Overwrite-Value")
- w.Write([]byte("Hello, client"))
- }))
- defer backend.Close()
-
- upstream := newFakeUpstream(backend.URL, false)
- upstream.host.DownstreamHeaders = http.Header{
- "+Merge-Me": {"Merge-Value"},
- "+Add-Me": {"Add-Value"},
- "-Remove-Me": {""},
- "Replace-Me": {"{hostname}"},
- }
- // set up proxy
- p := &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: []Upstream{upstream},
- }
-
- // create request and response recorder
- r := httptest.NewRequest("GET", "/", nil)
- w := httptest.NewRecorder()
- // set a predefined skip header
- w.Header().Set("Content-Type", "text/css")
- // set a predefined overwritten header
- w.Header().Set("Overwrite-Me", "Initial")
-
- p.ServeHTTP(w, r)
-
- replacer := httpserver.NewReplacer(r, nil, "")
- actualHeaders := w.Header()
-
- for headerKey, expect := range map[string][]string{
- "Merge-Me": {"Initial", "Merge-Value"},
- "Add-Me": {"Add-Value"},
- "Remove-Me": nil,
- "Replace-Me": {replacer.Replace("{hostname}")},
- "Content-Type": {"text/css"},
- "Overwrite-Me": {"Overwrite-Value"},
- } {
- if got := actualHeaders[headerKey]; !reflect.DeepEqual(got, expect) {
- t.Errorf("Downstream response does not contain expected %s header: expect %v, but got %v",
- headerKey, expect, got)
- }
- }
-}
-
-var (
- upstreamResp1 = []byte("Hello, /")
- upstreamResp2 = []byte("Hello, /api/")
-)
-
-func newMultiHostTestProxy() *Proxy {
- // No-op backends.
- upstreamServer1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprintf(w, "%s", upstreamResp1)
- }))
- upstreamServer2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprintf(w, "%s", upstreamResp2)
- }))
- p := &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: []Upstream{
- // The order is important; the short path should go first to ensure
- // we choose the most specific route, not the first one.
- &fakeUpstream{
- name: upstreamServer1.URL,
- from: "/",
- },
- &fakeUpstream{
- name: upstreamServer2.URL,
- from: "/api",
- },
- },
- }
- return p
-}
-
-func TestMultiReverseProxyFromClient(t *testing.T) {
- p := newMultiHostTestProxy()
-
- // This is a full end-end test, so the proxy handler.
- proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- p.ServeHTTP(w, r)
- }))
- defer proxy.Close()
-
- // Table tests.
- var multiProxy = []struct {
- url string
- body []byte
- }{
- {
- "/",
- upstreamResp1,
- },
- {
- "/api/",
- upstreamResp2,
- },
- {
- "/messages/",
- upstreamResp1,
- },
- {
- "/api/messages/?text=cat",
- upstreamResp2,
- },
- }
-
- for _, tt := range multiProxy {
- // Create client request
- reqURL := proxy.URL + tt.url
- req, err := http.NewRequest("GET", reqURL, nil)
-
- if err != nil {
- t.Fatalf("Failed to make request: %v", err)
- }
-
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatalf("Failed to make request: %v", err)
- }
- body, err := ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- if err != nil {
- t.Fatalf("Failed to read response: %v", err)
- }
-
- if !bytes.Equal(body, tt.body) {
- t.Errorf("Expected '%s' but got '%s' instead", tt.body, body)
- }
- }
-}
-
-func TestHostSimpleProxyNoHeaderForward(t *testing.T) {
- var requestHost string
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- requestHost = r.Host
- w.Write([]byte("Hello, client"))
- }))
- defer backend.Close()
-
- // set up proxy
- p := &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: []Upstream{newFakeUpstream(backend.URL, false)},
- }
-
- r := httptest.NewRequest("GET", "/", nil)
- r.Host = "test.com"
-
- w := httptest.NewRecorder()
-
- p.ServeHTTP(w, r)
-
- if !strings.Contains(backend.URL, "//") {
- t.Fatalf("The URL of the backend server doesn't contains //: %s", backend.URL)
- }
-
- expectedHost := strings.Split(backend.URL, "//")
- if expectedHost[1] != requestHost {
- t.Fatalf("Expected %s as a Host header got %s\n", expectedHost[1], requestHost)
- }
-}
-
-func TestHostHeaderReplacedUsingForward(t *testing.T) {
- var requestHost string
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- requestHost = r.Host
- w.Write([]byte("Hello, client"))
- }))
- defer backend.Close()
-
- upstream := newFakeUpstream(backend.URL, false)
- proxyHostHeader := "test2.com"
- upstream.host.UpstreamHeaders = http.Header{"Host": []string{proxyHostHeader}}
- // set up proxy
- p := &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: []Upstream{upstream},
- }
-
- r := httptest.NewRequest("GET", "/", nil)
- r.Host = "test.com"
-
- w := httptest.NewRecorder()
-
- p.ServeHTTP(w, r)
-
- if proxyHostHeader != requestHost {
- t.Fatalf("Expected %s as a Host header got %s\n", proxyHostHeader, requestHost)
- }
-}
-
-func TestBasicAuth(t *testing.T) {
- basicAuthTestcase(t, nil, nil)
- basicAuthTestcase(t, nil, url.UserPassword("username", "password"))
- basicAuthTestcase(t, url.UserPassword("usename", "password"), nil)
- basicAuthTestcase(t, url.UserPassword("unused", "unused"),
- url.UserPassword("username", "password"))
-}
-
-func basicAuthTestcase(t *testing.T, upstreamUser, clientUser *url.Userinfo) {
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- u, p, ok := r.BasicAuth()
-
- if ok {
- w.Write([]byte(u))
- }
- if ok && p != "" {
- w.Write([]byte(":"))
- w.Write([]byte(p))
- }
- }))
- defer backend.Close()
-
- backURL, err := url.Parse(backend.URL)
- if err != nil {
- t.Fatalf("Failed to parse URL: %v", err)
- }
- backURL.User = upstreamUser
-
- p := &Proxy{
- Next: httpserver.EmptyNext,
- Upstreams: []Upstream{newFakeUpstream(backURL.String(), false)},
- }
- r, err := http.NewRequest("GET", "/foo", nil)
- if err != nil {
- t.Fatalf("Failed to create request: %v", err)
- }
- if clientUser != nil {
- u := clientUser.Username()
- p, _ := clientUser.Password()
- r.SetBasicAuth(u, p)
- }
- w := httptest.NewRecorder()
-
- p.ServeHTTP(w, r)
-
- if w.Code != 200 {
- t.Fatalf("Invalid response code: %d", w.Code)
- }
- body, _ := ioutil.ReadAll(w.Body)
-
- if clientUser != nil {
- if string(body) != clientUser.String() {
- t.Fatalf("Invalid auth info: %s", string(body))
- }
- } else {
- if upstreamUser != nil {
- if string(body) != upstreamUser.String() {
- t.Fatalf("Invalid auth info: %s", string(body))
- }
- } else {
- if string(body) != "" {
- t.Fatalf("Invalid auth info: %s", string(body))
- }
- }
- }
-}
-
-func TestProxyDirectorURL(t *testing.T) {
- for i, c := range []struct {
- requestURL string
- targetURL string
- without string
- expectURL string
- }{
- {
- requestURL: `http://localhost:2020/test`,
- targetURL: `https://localhost:2021`,
- expectURL: `https://localhost:2021/test`,
- },
- {
- requestURL: `http://localhost:2020/test`,
- targetURL: `https://localhost:2021/t`,
- expectURL: `https://localhost:2021/t/test`,
- },
- {
- requestURL: `http://localhost:2020/test?t=w`,
- targetURL: `https://localhost:2021/t`,
- expectURL: `https://localhost:2021/t/test?t=w`,
- },
- {
- requestURL: `http://localhost:2020/test`,
- targetURL: `https://localhost:2021/t?foo=bar`,
- expectURL: `https://localhost:2021/t/test?foo=bar`,
- },
- {
- requestURL: `http://localhost:2020/test?t=w`,
- targetURL: `https://localhost:2021/t?foo=bar`,
- expectURL: `https://localhost:2021/t/test?foo=bar&t=w`,
- },
- {
- requestURL: `http://localhost:2020/test?t=w`,
- targetURL: `https://localhost:2021/t?foo=bar`,
- expectURL: `https://localhost:2021/t?foo=bar&t=w`,
- without: "/test",
- },
- {
- requestURL: `http://localhost:2020/test?t%3dw`,
- targetURL: `https://localhost:2021/t?foo%3dbar`,
- expectURL: `https://localhost:2021/t?foo%3dbar&t%3dw`,
- without: "/test",
- },
- {
- requestURL: `http://localhost:2020/test/`,
- targetURL: `https://localhost:2021/t/`,
- expectURL: `https://localhost:2021/t/test/`,
- },
- {
- requestURL: `http://localhost:2020/test/mypath`,
- targetURL: `https://localhost:2021/t/`,
- expectURL: `https://localhost:2021/t/mypath`,
- without: "/test",
- },
- {
- requestURL: `http://localhost:2020/%2C`,
- targetURL: `https://localhost:2021/t/`,
- expectURL: `https://localhost:2021/t/%2C`,
- },
- {
- requestURL: `http://localhost:2020/%2C/`,
- targetURL: `https://localhost:2021/t/`,
- expectURL: `https://localhost:2021/t/%2C/`,
- },
- {
- requestURL: `http://localhost:2020/test`,
- targetURL: `https://localhost:2021/%2C`,
- expectURL: `https://localhost:2021/%2C/test`,
- },
- {
- requestURL: `http://localhost:2020/%2C`,
- targetURL: `https://localhost:2021/%2C`,
- expectURL: `https://localhost:2021/%2C/%2C`,
- },
- {
- requestURL: `http://localhost:2020/%2F/test`,
- targetURL: `https://localhost:2021/`,
- expectURL: `https://localhost:2021/%2F/test`,
- },
- {
- requestURL: `http://localhost:2020/test/%2F/mypath`,
- targetURL: `https://localhost:2021/t/`,
- expectURL: `https://localhost:2021/t/%2F/mypath`,
- without: "/test",
- },
- } {
- targetURL, err := url.Parse(c.targetURL)
- if err != nil {
- t.Errorf("case %d failed to parse target URL: %s", i, err)
- continue
- }
- req, err := http.NewRequest("GET", c.requestURL, nil)
- if err != nil {
- t.Errorf("case %d failed to create request: %s", i, err)
- continue
- }
-
- NewSingleHostReverseProxy(targetURL, c.without, 0).Director(req)
- if expect, got := c.expectURL, req.URL.String(); expect != got {
- t.Errorf("case %d url not equal: expect %q, but got %q",
- i, expect, got)
- }
- }
-}
-
-func TestReverseProxyRetry(t *testing.T) {
- log.SetOutput(ioutil.Discard)
- defer log.SetOutput(os.Stderr)
-
- // set up proxy
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- io.Copy(w, r.Body)
- r.Body.Close()
- }))
- defer backend.Close()
-
- su, err := NewStaticUpstreams(caddyfile.NewDispenser("Testfile", strings.NewReader(`
- proxy / localhost:65535 localhost:65534 `+backend.URL+` {
- policy round_robin
- fail_timeout 5s
- max_fails 1
- try_duration 5s
- try_interval 250ms
- }
- `)), "")
- if err != nil {
- t.Fatal(err)
- }
-
- p := &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: su,
- }
-
- // middle is required to simulate closable downstream request body
- middle := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- _, err = p.ServeHTTP(w, r)
- if err != nil {
- t.Error(err)
- }
- }))
- defer middle.Close()
-
- testcase := "test content"
- r, err := http.NewRequest("POST", middle.URL, bytes.NewBufferString(testcase))
- if err != nil {
- t.Fatal(err)
- }
- resp, err := http.DefaultTransport.RoundTrip(r)
- if err != nil {
- t.Fatal(err)
- }
- b, err := ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- if err != nil {
- t.Fatal(err)
- }
- if string(b) != testcase {
- t.Fatalf("string(b) = %s, want %s", string(b), testcase)
- }
-}
-
-func TestReverseProxyLargeBody(t *testing.T) {
- log.SetOutput(ioutil.Discard)
- defer log.SetOutput(os.Stderr)
-
- // set up proxy
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- io.Copy(ioutil.Discard, r.Body)
- r.Body.Close()
- }))
- defer backend.Close()
-
- su, err := NewStaticUpstreams(caddyfile.NewDispenser("Testfile", strings.NewReader(`proxy / `+backend.URL)), "")
- if err != nil {
- t.Fatal(err)
- }
-
- p := &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: su,
- }
-
- // middle is required to simulate closable downstream request body
- middle := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- _, err = p.ServeHTTP(w, r)
- if err != nil {
- t.Error(err)
- }
- }))
- defer middle.Close()
-
- // Our request body will be 100MB
- bodySize := uint64(100 * 1000 * 1000)
-
- // We want to see how much memory the proxy module requires for this request.
- // So lets record the mem stats before we start it.
- begMemstats := &runtime.MemStats{}
- runtime.ReadMemStats(begMemstats)
-
- r, err := http.NewRequest("POST", middle.URL, &noopReader{len: bodySize})
- if err != nil {
- t.Fatal(err)
- }
- resp, err := http.DefaultTransport.RoundTrip(r)
- if err != nil {
- t.Fatal(err)
- }
- resp.Body.Close()
-
- // Finally we need the mem stats after the request is done...
- endMemstats := &runtime.MemStats{}
- runtime.ReadMemStats(endMemstats)
-
- // ...to calculate the total amount of allocated memory during the request.
- totalAlloc := endMemstats.TotalAlloc - begMemstats.TotalAlloc
-
- // If that's as much as the size of the body itself it's a serious sign that the
- // request was not "streamed" to the upstream without buffering it first.
- if totalAlloc >= bodySize {
- t.Fatalf("proxy allocated too much memory: %d bytes", totalAlloc)
- }
-}
-
-func TestCancelRequest(t *testing.T) {
- reqInFlight := make(chan struct{})
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- close(reqInFlight) // cause the client to cancel its request
-
- select {
- case <-time.After(10 * time.Second):
- t.Error("Handler never saw CloseNotify")
- return
- case <-w.(http.CloseNotifier).CloseNotify():
- }
-
- w.WriteHeader(http.StatusOK)
- w.Write([]byte("Hello, client"))
- }))
- defer backend.Close()
-
- // set up proxy
- p := &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: []Upstream{newFakeUpstream(backend.URL, false)},
- }
-
- // setup request with cancel ctx
- req := httptest.NewRequest("GET", "/", nil)
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- req = req.WithContext(ctx)
-
- // wait for canceling the request
- go func() {
- <-reqInFlight
- cancel()
- }()
-
- rec := httptest.NewRecorder()
- status, err := p.ServeHTTP(rec, req)
- expectedStatus, expectErr := http.StatusBadGateway, context.Canceled
- if status != expectedStatus || err != expectErr {
- t.Errorf("expect proxy handle return status[%d] with error[%v], but got status[%d] with error[%v]",
- expectedStatus, expectErr, status, err)
- }
- if body := rec.Body.String(); body != "" {
- t.Errorf("expect a blank response, but got %q", body)
- }
-}
-
-type noopReader struct {
- len uint64
- pos uint64
-}
-
-var _ io.Reader = &noopReader{}
-
-func (r *noopReader) Read(b []byte) (int, error) {
- if r.pos >= r.len {
- return 0, io.EOF
- }
- n := int(r.len - r.pos)
- if n > len(b) {
- n = len(b)
- }
- for i := range b[:n] {
- b[i] = 0
- }
- r.pos += uint64(n)
- return n, nil
-}
-
-func newFakeUpstream(name string, insecure bool) *fakeUpstream {
- uri, _ := url.Parse(name)
- u := &fakeUpstream{
- name: name,
- from: "/",
- host: &UpstreamHost{
- Name: name,
- ReverseProxy: NewSingleHostReverseProxy(uri, "", http.DefaultMaxIdleConnsPerHost),
- },
- }
- if insecure {
- u.host.ReverseProxy.UseInsecureTransport()
- }
- return u
-}
-
-type fakeUpstream struct {
- name string
- host *UpstreamHost
- from string
- without string
-}
-
-func (u *fakeUpstream) From() string {
- return u.from
-}
-
-func (u *fakeUpstream) Select(r *http.Request) *UpstreamHost {
- if u.host == nil {
- uri, err := url.Parse(u.name)
- if err != nil {
- log.Fatalf("Unable to url.Parse %s: %v", u.name, err)
- }
- u.host = &UpstreamHost{
- Name: u.name,
- ReverseProxy: NewSingleHostReverseProxy(uri, u.without, http.DefaultMaxIdleConnsPerHost),
- }
- }
- return u.host
-}
-
-func (u *fakeUpstream) AllowedPath(requestPath string) bool { return true }
-func (u *fakeUpstream) GetTryDuration() time.Duration { return 1 * time.Second }
-func (u *fakeUpstream) GetTryInterval() time.Duration { return 250 * time.Millisecond }
-func (u *fakeUpstream) GetHostCount() int { return 1 }
-func (u *fakeUpstream) Stop() error { return nil }
-
-// newWebSocketTestProxy returns a test proxy that will
-// redirect to the specified backendAddr. The function
-// also sets up the rules/environment for testing WebSocket
-// proxy.
-func newWebSocketTestProxy(backendAddr string, insecure bool) *Proxy {
- return &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: []Upstream{&fakeWsUpstream{
- name: backendAddr,
- without: "",
- insecure: insecure,
- }},
- }
-}
-
-func newPrefixedWebSocketTestProxy(backendAddr string, prefix string) *Proxy {
- return &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: []Upstream{&fakeWsUpstream{name: backendAddr, without: prefix}},
- }
-}
-
-type fakeWsUpstream struct {
- name string
- without string
- insecure bool
-}
-
-func (u *fakeWsUpstream) From() string {
- return "/"
-}
-
-func (u *fakeWsUpstream) Select(r *http.Request) *UpstreamHost {
- uri, _ := url.Parse(u.name)
- host := &UpstreamHost{
- Name: u.name,
- ReverseProxy: NewSingleHostReverseProxy(uri, u.without, http.DefaultMaxIdleConnsPerHost),
- UpstreamHeaders: http.Header{
- "Connection": {"{>Connection}"},
- "Upgrade": {"{>Upgrade}"}},
- }
- if u.insecure {
- host.ReverseProxy.UseInsecureTransport()
- }
- return host
-}
-
-func (u *fakeWsUpstream) AllowedPath(requestPath string) bool { return true }
-func (u *fakeWsUpstream) GetTryDuration() time.Duration { return 1 * time.Second }
-func (u *fakeWsUpstream) GetTryInterval() time.Duration { return 250 * time.Millisecond }
-func (u *fakeWsUpstream) GetHostCount() int { return 1 }
-func (u *fakeWsUpstream) Stop() error { return nil }
-
-// recorderHijacker is a ResponseRecorder that can
-// be hijacked.
-type recorderHijacker struct {
- *httptest.ResponseRecorder
- fakeConn *fakeConn
-}
-
-func (rh *recorderHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- return rh.fakeConn, nil, nil
-}
-
-type fakeConn struct {
- readBuf bytes.Buffer
- writeBuf bytes.Buffer
-}
-
-func (c *fakeConn) LocalAddr() net.Addr { return nil }
-func (c *fakeConn) RemoteAddr() net.Addr { return nil }
-func (c *fakeConn) SetDeadline(t time.Time) error { return nil }
-func (c *fakeConn) SetReadDeadline(t time.Time) error { return nil }
-func (c *fakeConn) SetWriteDeadline(t time.Time) error { return nil }
-func (c *fakeConn) Close() error { return nil }
-func (c *fakeConn) Read(b []byte) (int, error) { return c.readBuf.Read(b) }
-func (c *fakeConn) Write(b []byte) (int, error) { return c.writeBuf.Write(b) }
-
-// testResponseRecorder wraps `httptest.ResponseRecorder`,
-// also implements `http.CloseNotifier`, `http.Hijacker` and `http.Pusher`.
-type testResponseRecorder struct {
- *httpserver.ResponseWriterWrapper
-}
-
-func (testResponseRecorder) CloseNotify() <-chan bool { return nil }
-
-// Interface guards
-var _ httpserver.HTTPInterfaces = testResponseRecorder{}
-
-func BenchmarkProxy(b *testing.B) {
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte("Hello, client"))
- }))
- defer backend.Close()
-
- upstream := newFakeUpstream(backend.URL, false)
- upstream.host.UpstreamHeaders = http.Header{
- "Hostname": {"{hostname}"},
- "Host": {"{host}"},
- "X-Real-IP": {"{remote}"},
- "X-Forwarded-Proto": {"{scheme}"},
- }
- // set up proxy
- p := &Proxy{
- Next: httpserver.EmptyNext, // prevents panic in some cases when test fails
- Upstreams: []Upstream{upstream},
- }
-
- w := httptest.NewRecorder()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- b.StopTimer()
- // create request and response recorder
- r, err := http.NewRequest("GET", "/", nil)
- if err != nil {
- b.Fatalf("Failed to create request: %v", err)
- }
- b.StartTimer()
- p.ServeHTTP(w, r)
- }
-}
-
-func TestChunkedWebSocketReverseProxy(t *testing.T) {
- s := websocket.Server{
- Handler: websocket.Handler(func(ws *websocket.Conn) {
- for {
- select {}
- }
- }),
- }
- s.Config.Header = http.Header(make(map[string][]string))
- s.Config.Header.Set("Transfer-Encoding", "chunked")
-
- wsNop := httptest.NewServer(s)
- defer wsNop.Close()
-
- // Get proxy to use for the test
- p := newWebSocketTestProxy(wsNop.URL, false)
-
- // Create client request
- r := httptest.NewRequest("GET", "/", nil)
-
- r.Header = http.Header{
- "Connection": {"Upgrade"},
- "Upgrade": {"websocket"},
- "Origin": {wsNop.URL},
- "Sec-WebSocket-Key": {"x3JJHMbDL1EzLkh9GBhXDw=="},
- "Sec-WebSocket-Version": {"13"},
- }
-
- // Capture the request
- w := &recorderHijacker{httptest.NewRecorder(), new(fakeConn)}
-
- // Booya! Do the test.
- _, err := p.ServeHTTP(w, r)
-
- // Make sure the backend accepted the WS connection.
- // Mostly interested in the Upgrade and Connection response headers
- // and the 101 status code.
- expected := []byte("HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: HSmrc0sMlYUkAGmm5OPpG2HaGWk=\r\nTransfer-Encoding: chunked\r\n\r\n")
- actual := w.fakeConn.writeBuf.Bytes()
- if !bytes.Equal(actual, expected) {
- t.Errorf("Expected backend to accept response:\n'%s'\nActually got:\n'%s'", expected, actual)
- }
-
- if err != nil {
- t.Error(err)
- }
-}
diff --git a/caddyhttp/proxy/reverseproxy.go b/caddyhttp/proxy/reverseproxy.go
deleted file mode 100644
index 41687cc189e..00000000000
--- a/caddyhttp/proxy/reverseproxy.go
+++ /dev/null
@@ -1,656 +0,0 @@
-// This file is adapted from code in the net/http/httputil
-// package of the Go standard library, which is by the
-// Go Authors, and bears this copyright and license info:
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-//
-// This file has been modified from the standard lib to
-// meet the needs of the application.
-
-package proxy
-
-import (
- "crypto/tls"
- "io"
- "net"
- "net/http"
- "net/url"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/http2"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-var (
- defaultDialer = &net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }
-
- bufferPool = sync.Pool{New: createBuffer}
-)
-
-func createBuffer() interface{} {
- return make([]byte, 0, 32*1024)
-}
-
-func pooledIoCopy(dst io.Writer, src io.Reader) {
- buf := bufferPool.Get().([]byte)
- defer bufferPool.Put(buf)
-
- // CopyBuffer only uses buf up to its length and panics if it's 0.
- // Due to that we extend buf's length to its capacity here and
- // ensure it's always non-zero.
- bufCap := cap(buf)
- io.CopyBuffer(dst, src, buf[0:bufCap:bufCap])
-}
-
-// onExitFlushLoop is a callback set by tests to detect the state of the
-// flushLoop() goroutine.
-var onExitFlushLoop func()
-
-// ReverseProxy is an HTTP Handler that takes an incoming request and
-// sends it to another server, proxying the response back to the
-// client.
-type ReverseProxy struct {
- // Director must be a function which modifies
- // the request into a new request to be sent
- // using Transport. Its response is then copied
- // back to the original client unmodified.
- Director func(*http.Request)
-
- // The transport used to perform proxy requests.
- // If nil, http.DefaultTransport is used.
- Transport http.RoundTripper
-
- // FlushInterval specifies the flush interval
- // to flush to the client while copying the
- // response body.
- // If zero, no periodic flushing is done.
- FlushInterval time.Duration
-}
-
-// Though the relevant directive prefix is just "unix:", url.Parse
-// will - assuming the regular URL scheme - add additional slashes
-// as if "unix" was a request protocol.
-// What we need is just the path, so if "unix:/var/run/www.socket"
-// was the proxy directive, the parsed hostName would be
-// "unix:///var/run/www.socket", hence the ambiguous trimming.
-func socketDial(hostName string) func(network, addr string) (conn net.Conn, err error) {
- return func(network, addr string) (conn net.Conn, err error) {
- return net.Dial("unix", hostName[len("unix://"):])
- }
-}
-
-func singleJoiningSlash(a, b string) string {
- aslash := strings.HasSuffix(a, "/")
- bslash := strings.HasPrefix(b, "/")
- switch {
- case aslash && bslash:
- return a + b[1:]
- case !aslash && !bslash && b != "":
- return a + "/" + b
- }
- return a + b
-}
-
-// NewSingleHostReverseProxy returns a new ReverseProxy that rewrites
-// URLs to the scheme, host, and base path provided in target. If the
-// target's path is "/base" and the incoming request was for "/dir",
-// the target request will be for /base/dir.
-// Without logic: target's path is "/", incoming is "/api/messages",
-// without is "/api", then the target request will be for /messages.
-func NewSingleHostReverseProxy(target *url.URL, without string, keepalive int) *ReverseProxy {
- targetQuery := target.RawQuery
- director := func(req *http.Request) {
- if target.Scheme == "unix" {
- // to make Dial work with unix URL,
- // scheme and host have to be faked
- req.URL.Scheme = "http"
- req.URL.Host = "socket"
- } else {
- req.URL.Scheme = target.Scheme
- req.URL.Host = target.Host
- }
-
- // remove the `without` prefix
- if without != "" {
- req.URL.Path = strings.TrimPrefix(req.URL.Path, without)
- if req.URL.Opaque != "" {
- req.URL.Opaque = strings.TrimPrefix(req.URL.Opaque, without)
- }
- if req.URL.RawPath != "" {
- req.URL.RawPath = strings.TrimPrefix(req.URL.RawPath, without)
- }
- }
-
- // prefer returns val if it isn't empty, otherwise def
- prefer := func(val, def string) string {
- if val != "" {
- return val
- }
- return def
- }
-
- // Make up the final URL by concatenating the request and target URL.
- //
- // If there is encoded part in request or target URL,
- // the final URL should also be in encoded format.
- // Here, we concatenate their encoded parts which are stored
- // in URL.Opaque and URL.RawPath, if it is empty use
- // URL.Path instead.
- if req.URL.Opaque != "" || target.Opaque != "" {
- req.URL.Opaque = singleJoiningSlash(
- prefer(target.Opaque, target.Path),
- prefer(req.URL.Opaque, req.URL.Path))
- }
- if req.URL.RawPath != "" || target.RawPath != "" {
- req.URL.RawPath = singleJoiningSlash(
- prefer(target.RawPath, target.Path),
- prefer(req.URL.RawPath, req.URL.Path))
- }
- req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
-
- // Trims the path of the socket from the URL path.
- // This is done because req.URL passed to your proxied service
- // will have the full path of the socket file prefixed to it.
- // Calling /test on a server that proxies requests to
- // unix:/var/run/www.socket will thus set the requested path
- // to /var/run/www.socket/test, rendering paths useless.
- if target.Scheme == "unix" {
- // See comment on socketDial for the trim
- socketPrefix := target.String()[len("unix://"):]
- req.URL.Path = strings.TrimPrefix(req.URL.Path, socketPrefix)
- if req.URL.Opaque != "" {
- req.URL.Opaque = strings.TrimPrefix(req.URL.Opaque, socketPrefix)
- }
- if req.URL.RawPath != "" {
- req.URL.RawPath = strings.TrimPrefix(req.URL.RawPath, socketPrefix)
- }
- }
-
- if targetQuery == "" || req.URL.RawQuery == "" {
- req.URL.RawQuery = targetQuery + req.URL.RawQuery
- } else {
- req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
- }
- }
- rp := &ReverseProxy{Director: director, FlushInterval: 250 * time.Millisecond} // flushing good for streaming & server-sent events
- if target.Scheme == "unix" {
- rp.Transport = &http.Transport{
- Dial: socketDial(target.String()),
- }
- } else if keepalive != http.DefaultMaxIdleConnsPerHost {
- // if keepalive is equal to the default,
- // just use default transport, to avoid creating
- // a brand new transport
- transport := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: defaultDialer.Dial,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- }
- if keepalive == 0 {
- transport.DisableKeepAlives = true
- } else {
- transport.MaxIdleConnsPerHost = keepalive
- }
- if httpserver.HTTP2 {
- http2.ConfigureTransport(transport)
- }
- rp.Transport = transport
- }
- return rp
-}
-
-// UseInsecureTransport is used to facilitate HTTPS proxying
-// when it is OK for upstream to be using a bad certificate,
-// since this transport skips verification.
-func (rp *ReverseProxy) UseInsecureTransport() {
- if rp.Transport == nil {
- transport := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: defaultDialer.Dial,
- TLSHandshakeTimeout: 10 * time.Second,
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- }
- if httpserver.HTTP2 {
- http2.ConfigureTransport(transport)
- }
- rp.Transport = transport
- } else if transport, ok := rp.Transport.(*http.Transport); ok {
- if transport.TLSClientConfig == nil {
- transport.TLSClientConfig = &tls.Config{}
- }
- transport.TLSClientConfig.InsecureSkipVerify = true
- // No http2.ConfigureTransport() here.
- // For now this is only added in places where
- // an http.Transport is actually created.
- }
-}
-
-// ServeHTTP serves the proxied request to the upstream by performing a roundtrip.
-// It is designed to handle websocket connection upgrades as well.
-func (rp *ReverseProxy) ServeHTTP(rw http.ResponseWriter, outreq *http.Request, respUpdateFn respUpdateFn) error {
- transport := rp.Transport
- if requestIsWebsocket(outreq) {
- transport = newConnHijackerTransport(transport)
- } else if transport == nil {
- transport = http.DefaultTransport
- }
-
- rp.Director(outreq)
-
- res, err := transport.RoundTrip(outreq)
- if err != nil {
- return err
- }
-
- isWebsocket := res.StatusCode == http.StatusSwitchingProtocols && strings.ToLower(res.Header.Get("Upgrade")) == "websocket"
-
- // Remove hop-by-hop headers listed in the
- // "Connection" header of the response.
- if c := res.Header.Get("Connection"); c != "" {
- for _, f := range strings.Split(c, ",") {
- if f = strings.TrimSpace(f); f != "" {
- res.Header.Del(f)
- }
- }
- }
-
- for _, h := range hopHeaders {
- res.Header.Del(h)
- }
-
- if respUpdateFn != nil {
- respUpdateFn(res)
- }
-
- if isWebsocket {
- defer res.Body.Close()
- hj, ok := rw.(http.Hijacker)
- if !ok {
- panic(httpserver.NonHijackerError{Underlying: rw})
- }
-
- conn, brw, err := hj.Hijack()
- if err != nil {
- return err
- }
- defer conn.Close()
-
- var backendConn net.Conn
- if hj, ok := transport.(*connHijackerTransport); ok {
- backendConn = hj.Conn
- if _, err := conn.Write(hj.Replay); err != nil {
- return err
- }
- bufferPool.Put(hj.Replay)
- } else {
- backendConn, err = net.Dial("tcp", outreq.URL.Host)
- if err != nil {
- return err
- }
- outreq.Write(backendConn)
- }
- defer backendConn.Close()
-
- // Proxy backend -> frontend.
- go pooledIoCopy(conn, backendConn)
-
- // Proxy frontend -> backend.
- //
- // NOTE: Hijack() sometimes returns buffered up bytes in brw which
- // would be lost if we didn't read them out manually below.
- if brw != nil {
- if n := brw.Reader.Buffered(); n > 0 {
- rbuf, err := brw.Reader.Peek(n)
- if err != nil {
- return err
- }
- backendConn.Write(rbuf)
- }
- }
- pooledIoCopy(backendConn, conn)
- } else {
- // NOTE:
- // Closing the Body involves acquiring a mutex, which is a
- // unnecessarily heavy operation, considering that this defer will
- // pretty much never be executed with the Body still unclosed.
- bodyOpen := true
- closeBody := func() {
- if bodyOpen {
- res.Body.Close()
- bodyOpen = false
- }
- }
- defer closeBody()
-
- // Copy all headers over.
- // res.Header does not include the "Trailer" header,
- // which means we will have to do that manually below.
- copyHeader(rw.Header(), res.Header)
-
- // The "Trailer" header isn't included in res' Header map, which
- // is why we have to build one ourselves from res.Trailer.
- //
- // But res.Trailer does not necessarily contain all trailer keys at this
- // point yet. The HTTP spec allows one to send "unannounced trailers"
- // after a request and certain systems like gRPC make use of that.
- announcedTrailerKeyCount := len(res.Trailer)
- if announcedTrailerKeyCount > 0 {
- vv := make([]string, 0, announcedTrailerKeyCount)
- for k := range res.Trailer {
- vv = append(vv, k)
- }
- rw.Header()["Trailer"] = vv
- }
-
- // Now copy over the status code as well as the response body.
- rw.WriteHeader(res.StatusCode)
- if announcedTrailerKeyCount > 0 {
- // Force chunking if we saw a response trailer.
- // This prevents net/http from calculating the length
- // for short bodies and adding a Content-Length.
- if fl, ok := rw.(http.Flusher); ok {
- fl.Flush()
- }
- }
- rp.copyResponse(rw, res.Body)
-
- // Now close the body to fully populate res.Trailer.
- closeBody()
-
- // Since Go does not remove keys from res.Trailer we
- // can safely do a length comparison to check wether
- // we received further, unannounced trailers.
- //
- // Most of the time forceSetTrailers should be false.
- forceSetTrailers := len(res.Trailer) != announcedTrailerKeyCount
- shallowCopyTrailers(rw.Header(), res.Trailer, forceSetTrailers)
- }
-
- return nil
-}
-
-func (rp *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) {
- if rp.FlushInterval != 0 {
- if wf, ok := dst.(writeFlusher); ok {
- mlw := &maxLatencyWriter{
- dst: wf,
- latency: rp.FlushInterval,
- done: make(chan bool),
- }
- go mlw.flushLoop()
- defer mlw.stop()
- dst = mlw
- }
- }
- pooledIoCopy(dst, src)
-}
-
-// skip these headers if they already exist.
-// see https://github.com/mholt/caddy/pull/1112#discussion_r80092582
-var skipHeaders = map[string]struct{}{
- "Content-Type": {},
- "Content-Disposition": {},
- "Accept-Ranges": {},
- "Set-Cookie": {},
- "Cache-Control": {},
- "Expires": {},
-}
-
-func copyHeader(dst, src http.Header) {
- for k, vv := range src {
- if _, ok := dst[k]; ok {
- // skip some predefined headers
- // see https://github.com/mholt/caddy/issues/1086
- if _, shouldSkip := skipHeaders[k]; shouldSkip {
- continue
- }
- // otherwise, overwrite to avoid duplicated fields that can be
- // problematic (see issue #1086) -- however, allow duplicate
- // Server fields so we can see the reality of the proxying.
- if k != "Server" {
- dst.Del(k)
- }
- }
- for _, v := range vv {
- dst.Add(k, v)
- }
- }
-}
-
-// shallowCopyTrailers copies all headers from srcTrailer to dstHeader.
-//
-// If forceSetTrailers is set to true, the http.TrailerPrefix will be added to
-// all srcTrailer key names. Otherwise the Go stdlib will ignore all keys
-// which weren't listed in the Trailer map before submitting the Response.
-//
-// WARNING: Only a shallow copy will be created!
-func shallowCopyTrailers(dstHeader, srcTrailer http.Header, forceSetTrailers bool) {
- for k, vv := range srcTrailer {
- if forceSetTrailers {
- k = http.TrailerPrefix + k
- }
- dstHeader[k] = vv
- }
-}
-
-// Hop-by-hop headers. These are removed when sent to the backend.
-// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
-var hopHeaders = []string{
- "Alt-Svc",
- "Alternate-Protocol",
- "Connection",
- "Keep-Alive",
- "Proxy-Authenticate",
- "Proxy-Authorization",
- "Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google
- "Te", // canonicalized version of "TE"
- "Trailer", // not Trailers per URL above; http://www.rfc-editor.org/errata_search.php?eid=4522
- "Transfer-Encoding",
- "Upgrade",
-}
-
-type respUpdateFn func(resp *http.Response)
-
-type hijackedConn struct {
- net.Conn
- hj *connHijackerTransport
-}
-
-func (c *hijackedConn) Read(b []byte) (n int, err error) {
- n, err = c.Conn.Read(b)
- c.hj.Replay = append(c.hj.Replay, b[:n]...)
- return
-}
-
-func (c *hijackedConn) Close() error {
- return nil
-}
-
-type connHijackerTransport struct {
- *http.Transport
- Conn net.Conn
- Replay []byte
-}
-
-func newConnHijackerTransport(base http.RoundTripper) *connHijackerTransport {
- t := &http.Transport{
- MaxIdleConnsPerHost: -1,
- }
- if b, _ := base.(*http.Transport); b != nil {
- tlsClientConfig := b.TLSClientConfig
- if tlsClientConfig != nil && tlsClientConfig.NextProtos != nil {
- tlsClientConfig = tlsClientConfig.Clone()
- tlsClientConfig.NextProtos = nil
- }
-
- t.Proxy = b.Proxy
- t.TLSClientConfig = tlsClientConfig
- t.TLSHandshakeTimeout = b.TLSHandshakeTimeout
- t.Dial = b.Dial
- t.DialTLS = b.DialTLS
- } else {
- t.Proxy = http.ProxyFromEnvironment
- t.TLSHandshakeTimeout = 10 * time.Second
- }
- hj := &connHijackerTransport{t, nil, bufferPool.Get().([]byte)[:0]}
-
- dial := getTransportDial(t)
- dialTLS := getTransportDialTLS(t)
- t.Dial = func(network, addr string) (net.Conn, error) {
- c, err := dial(network, addr)
- hj.Conn = c
- return &hijackedConn{c, hj}, err
- }
- t.DialTLS = func(network, addr string) (net.Conn, error) {
- c, err := dialTLS(network, addr)
- hj.Conn = c
- return &hijackedConn{c, hj}, err
- }
-
- return hj
-}
-
-// getTransportDial always returns a plain Dialer
-// and defaults to the existing t.Dial.
-func getTransportDial(t *http.Transport) func(network, addr string) (net.Conn, error) {
- if t.Dial != nil {
- return t.Dial
- }
- return defaultDialer.Dial
-}
-
-// getTransportDial always returns a TLS Dialer
-// and defaults to the existing t.DialTLS.
-func getTransportDialTLS(t *http.Transport) func(network, addr string) (net.Conn, error) {
- if t.DialTLS != nil {
- return t.DialTLS
- }
-
- // newConnHijackerTransport will modify t.Dial after calling this method
- // => Create a backup reference.
- plainDial := getTransportDial(t)
-
- // The following DialTLS implementation stems from the Go stdlib and
- // is identical to what happens if DialTLS is not provided.
- // Source: https://github.com/golang/go/blob/230a376b5a67f0e9341e1fa47e670ff762213c83/src/net/http/transport.go#L1018-L1051
- return func(network, addr string) (net.Conn, error) {
- plainConn, err := plainDial(network, addr)
- if err != nil {
- return nil, err
- }
-
- tlsClientConfig := t.TLSClientConfig
- if tlsClientConfig == nil {
- tlsClientConfig = &tls.Config{}
- }
- if !tlsClientConfig.InsecureSkipVerify && tlsClientConfig.ServerName == "" {
- tlsClientConfig.ServerName = stripPort(addr)
- }
-
- tlsConn := tls.Client(plainConn, tlsClientConfig)
- errc := make(chan error, 2)
- var timer *time.Timer
- if d := t.TLSHandshakeTimeout; d != 0 {
- timer = time.AfterFunc(d, func() {
- errc <- tlsHandshakeTimeoutError{}
- })
- }
- go func() {
- err := tlsConn.Handshake()
- if timer != nil {
- timer.Stop()
- }
- errc <- err
- }()
- if err := <-errc; err != nil {
- plainConn.Close()
- return nil, err
- }
- if !tlsClientConfig.InsecureSkipVerify {
- hostname := tlsClientConfig.ServerName
- if hostname == "" {
- hostname = stripPort(addr)
- }
- if err := tlsConn.VerifyHostname(hostname); err != nil {
- plainConn.Close()
- return nil, err
- }
- }
-
- return tlsConn, nil
- }
-}
-
-// stripPort returns address without its port if it has one and
-// works with IP addresses as well as hostnames formatted as host:port.
-//
-// IPv6 addresses (excluding the port) must be enclosed in
-// square brackets similar to the requirements of Go's stdlib.
-func stripPort(address string) string {
- // Keep in mind that the address might be a IPv6 address
- // and thus contain a colon, but not have a port.
- portIdx := strings.LastIndex(address, ":")
- ipv6Idx := strings.LastIndex(address, "]")
- if portIdx > ipv6Idx {
- address = address[:portIdx]
- }
- return address
-}
-
-type tlsHandshakeTimeoutError struct{}
-
-func (tlsHandshakeTimeoutError) Timeout() bool { return true }
-func (tlsHandshakeTimeoutError) Temporary() bool { return true }
-func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" }
-
-func requestIsWebsocket(req *http.Request) bool {
- return strings.ToLower(req.Header.Get("Upgrade")) == "websocket" && strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade")
-}
-
-type writeFlusher interface {
- io.Writer
- http.Flusher
-}
-
-type maxLatencyWriter struct {
- dst writeFlusher
- latency time.Duration
-
- lk sync.Mutex // protects Write + Flush
- done chan bool
-}
-
-func (m *maxLatencyWriter) Write(p []byte) (int, error) {
- m.lk.Lock()
- defer m.lk.Unlock()
- return m.dst.Write(p)
-}
-
-func (m *maxLatencyWriter) flushLoop() {
- t := time.NewTicker(m.latency)
- defer t.Stop()
- for {
- select {
- case <-m.done:
- if onExitFlushLoop != nil {
- onExitFlushLoop()
- }
- return
- case <-t.C:
- m.lk.Lock()
- m.dst.Flush()
- m.lk.Unlock()
- }
- }
-}
-
-func (m *maxLatencyWriter) stop() { m.done <- true }
diff --git a/caddyhttp/proxy/setup.go b/caddyhttp/proxy/setup.go
deleted file mode 100644
index e96b3f595fc..00000000000
--- a/caddyhttp/proxy/setup.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package proxy
-
-import (
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("proxy", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// setup configures a new Proxy middleware instance.
-func setup(c *caddy.Controller) error {
- upstreams, err := NewStaticUpstreams(c.Dispenser, httpserver.GetConfig(c).Host())
- if err != nil {
- return err
- }
- httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- return Proxy{Next: next, Upstreams: upstreams}
- })
-
- // Register shutdown handlers.
- for _, upstream := range upstreams {
- c.OnShutdown(upstream.Stop)
- }
-
- return nil
-}
diff --git a/caddyhttp/proxy/setup_test.go b/caddyhttp/proxy/setup_test.go
deleted file mode 100644
index 02809058fd8..00000000000
--- a/caddyhttp/proxy/setup_test.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package proxy
-
-import (
- "reflect"
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
- for i, test := range []struct {
- input string
- shouldErr bool
- expectedHosts map[string]struct{}
- }{
- // test #0 test usual to destination still works normally
- {
- "proxy / localhost:80",
- false,
- map[string]struct{}{
- "http://localhost:80": {},
- },
- },
-
- // test #1 test usual to destination with port range
- {
- "proxy / localhost:8080-8082",
- false,
- map[string]struct{}{
- "http://localhost:8080": {},
- "http://localhost:8081": {},
- "http://localhost:8082": {},
- },
- },
-
- // test #2 test upstream directive
- {
- "proxy / {\n upstream localhost:8080\n}",
- false,
- map[string]struct{}{
- "http://localhost:8080": {},
- },
- },
-
- // test #3 test upstream directive with port range
- {
- "proxy / {\n upstream localhost:8080-8081\n}",
- false,
- map[string]struct{}{
- "http://localhost:8080": {},
- "http://localhost:8081": {},
- },
- },
-
- // test #4 test to destination with upstream directive
- {
- "proxy / localhost:8080 {\n upstream localhost:8081-8082\n}",
- false,
- map[string]struct{}{
- "http://localhost:8080": {},
- "http://localhost:8081": {},
- "http://localhost:8082": {},
- },
- },
-
- // test #5 test with unix sockets
- {
- "proxy / localhost:8080 {\n upstream unix:/var/foo\n}",
- false,
- map[string]struct{}{
- "http://localhost:8080": {},
- "unix:/var/foo": {},
- },
- },
-
- // test #6 test fail on malformed port range
- {
- "proxy / localhost:8090-8080",
- true,
- nil,
- },
-
- // test #7 test fail on malformed port range 2
- {
- "proxy / {\n upstream localhost:80-A\n}",
- true,
- nil,
- },
-
- // test #8 test upstreams without ports work correctly
- {
- "proxy / http://localhost {\n upstream testendpoint\n}",
- false,
- map[string]struct{}{
- "http://localhost": {},
- "http://testendpoint": {},
- },
- },
-
- // test #9 test several upstream directives
- {
- "proxy / localhost:8080 {\n upstream localhost:8081-8082\n upstream localhost:8083-8085\n}",
- false,
- map[string]struct{}{
- "http://localhost:8080": {},
- "http://localhost:8081": {},
- "http://localhost:8082": {},
- "http://localhost:8083": {},
- "http://localhost:8084": {},
- "http://localhost:8085": {},
- },
- },
- // test #10 test hyphen without port range
- {
- "proxy / http://localhost:8001/a--b",
- false,
- map[string]struct{}{
- "http://localhost:8001/a--b": {},
- },
- },
- // test #11 test hyphen with port range
- {
- "proxy / http://localhost:8001-8005/a--b",
- false,
- map[string]struct{}{
- "http://localhost:8001/a--b": {},
- "http://localhost:8002/a--b": {},
- "http://localhost:8003/a--b": {},
- "http://localhost:8004/a--b": {},
- "http://localhost:8005/a--b": {},
- },
- },
- // test #12 test value is optional when remove upstream header
- {
- "proxy / localhost:1984 {\n header_upstream -server \n}",
- false,
- map[string]struct{}{
- "http://localhost:1984": {},
- },
- },
- // test #13 test value is optional when remove downstream header
- {
- "proxy / localhost:1984 {\n header_downstream -server \n}",
- false,
- map[string]struct{}{
- "http://localhost:1984": {},
- },
- },
- } {
- c := caddy.NewTestController("http", test.input)
- err := setup(c)
- if err != nil && !test.shouldErr {
- t.Errorf("Test case #%d received an error of %v", i, err)
- } else if test.shouldErr {
- continue
- }
-
- mids := httpserver.GetConfig(c).Middleware()
- mid := mids[len(mids)-1]
-
- upstreams := mid(nil).(Proxy).Upstreams
- for _, upstream := range upstreams {
- val := reflect.ValueOf(upstream).Elem()
- hosts := val.FieldByName("Hosts").Interface().(HostPool)
- if len(hosts) != len(test.expectedHosts) {
- t.Errorf("Test case #%d expected %d hosts but received %d", i, len(test.expectedHosts), len(hosts))
- } else {
- for _, host := range hosts {
- if _, found := test.expectedHosts[host.Name]; !found {
- t.Errorf("Test case #%d has an unexpected host %s", i, host.Name)
- }
- }
- }
- }
- }
-}
diff --git a/caddyhttp/proxy/upstream.go b/caddyhttp/proxy/upstream.go
deleted file mode 100644
index e7cc392b111..00000000000
--- a/caddyhttp/proxy/upstream.go
+++ /dev/null
@@ -1,548 +0,0 @@
-package proxy
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "path"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "crypto/tls"
-
- "github.com/mholt/caddy/caddyfile"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-var (
- supportedPolicies = make(map[string]func(string) Policy)
-)
-
-type staticUpstream struct {
- from string
- upstreamHeaders http.Header
- downstreamHeaders http.Header
- stop chan struct{} // Signals running goroutines to stop.
- wg sync.WaitGroup // Used to wait for running goroutines to stop.
- Hosts HostPool
- Policy Policy
- KeepAlive int
- FailTimeout time.Duration
- TryDuration time.Duration
- TryInterval time.Duration
- MaxConns int64
- HealthCheck struct {
- Client http.Client
- Path string
- Interval time.Duration
- Timeout time.Duration
- Host string
- Port string
- ContentString string
- }
- WithoutPathPrefix string
- IgnoredSubPaths []string
- insecureSkipVerify bool
- MaxFails int32
-}
-
-// NewStaticUpstreams parses the configuration input and sets up
-// static upstreams for the proxy middleware. The host string parameter,
-// if not empty, is used for setting the upstream Host header for the
-// health checks if the upstream header config requires it.
-func NewStaticUpstreams(c caddyfile.Dispenser, host string) ([]Upstream, error) {
- var upstreams []Upstream
- for c.Next() {
-
- upstream := &staticUpstream{
- from: "",
- stop: make(chan struct{}),
- upstreamHeaders: make(http.Header),
- downstreamHeaders: make(http.Header),
- Hosts: nil,
- Policy: &Random{},
- MaxFails: 1,
- TryInterval: 250 * time.Millisecond,
- MaxConns: 0,
- KeepAlive: http.DefaultMaxIdleConnsPerHost,
- }
-
- if !c.Args(&upstream.from) {
- return upstreams, c.ArgErr()
- }
-
- var to []string
- for _, t := range c.RemainingArgs() {
- parsed, err := parseUpstream(t)
- if err != nil {
- return upstreams, err
- }
- to = append(to, parsed...)
- }
-
- for c.NextBlock() {
- switch c.Val() {
- case "upstream":
- if !c.NextArg() {
- return upstreams, c.ArgErr()
- }
- parsed, err := parseUpstream(c.Val())
- if err != nil {
- return upstreams, err
- }
- to = append(to, parsed...)
- default:
- if err := parseBlock(&c, upstream); err != nil {
- return upstreams, err
- }
- }
- }
-
- if len(to) == 0 {
- return upstreams, c.ArgErr()
- }
-
- upstream.Hosts = make([]*UpstreamHost, len(to))
- for i, host := range to {
- uh, err := upstream.NewHost(host)
- if err != nil {
- return upstreams, err
- }
- upstream.Hosts[i] = uh
- }
-
- if upstream.HealthCheck.Path != "" {
- upstream.HealthCheck.Client = http.Client{
- Timeout: upstream.HealthCheck.Timeout,
- Transport: &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: upstream.insecureSkipVerify},
- },
- }
-
- // set up health check upstream host if we have one
- if host != "" {
- hostHeader := upstream.upstreamHeaders.Get("Host")
- if strings.Contains(hostHeader, "{host}") {
- upstream.HealthCheck.Host = strings.Replace(hostHeader, "{host}", host, -1)
- }
- }
- upstream.wg.Add(1)
- go func() {
- defer upstream.wg.Done()
- upstream.HealthCheckWorker(upstream.stop)
- }()
- }
- upstreams = append(upstreams, upstream)
- }
- return upstreams, nil
-}
-
-func (u *staticUpstream) From() string {
- return u.from
-}
-
-func (u *staticUpstream) NewHost(host string) (*UpstreamHost, error) {
- if !strings.HasPrefix(host, "http") &&
- !strings.HasPrefix(host, "unix:") {
- host = "http://" + host
- }
- uh := &UpstreamHost{
- Name: host,
- Conns: 0,
- Fails: 0,
- FailTimeout: u.FailTimeout,
- Unhealthy: 0,
- UpstreamHeaders: u.upstreamHeaders,
- DownstreamHeaders: u.downstreamHeaders,
- CheckDown: func(u *staticUpstream) UpstreamHostDownFunc {
- return func(uh *UpstreamHost) bool {
- if atomic.LoadInt32(&uh.Unhealthy) != 0 {
- return true
- }
- if atomic.LoadInt32(&uh.Fails) >= u.MaxFails {
- return true
- }
- return false
- }
- }(u),
- WithoutPathPrefix: u.WithoutPathPrefix,
- MaxConns: u.MaxConns,
- }
-
- baseURL, err := url.Parse(uh.Name)
- if err != nil {
- return nil, err
- }
-
- uh.ReverseProxy = NewSingleHostReverseProxy(baseURL, uh.WithoutPathPrefix, u.KeepAlive)
- if u.insecureSkipVerify {
- uh.ReverseProxy.UseInsecureTransport()
- }
-
- return uh, nil
-}
-
-func parseUpstream(u string) ([]string, error) {
- if !strings.HasPrefix(u, "unix:") {
- colonIdx := strings.LastIndex(u, ":")
- protoIdx := strings.Index(u, "://")
-
- if colonIdx != -1 && colonIdx != protoIdx {
- us := u[:colonIdx]
- ue := ""
- portsEnd := len(u)
- if nextSlash := strings.Index(u[colonIdx:], "/"); nextSlash != -1 {
- portsEnd = colonIdx + nextSlash
- ue = u[portsEnd:]
- }
- ports := u[len(us)+1 : portsEnd]
-
- if separators := strings.Count(ports, "-"); separators == 1 {
- portsStr := strings.Split(ports, "-")
- pIni, err := strconv.Atoi(portsStr[0])
- if err != nil {
- return nil, err
- }
-
- pEnd, err := strconv.Atoi(portsStr[1])
- if err != nil {
- return nil, err
- }
-
- if pEnd <= pIni {
- return nil, fmt.Errorf("port range [%s] is invalid", ports)
- }
-
- hosts := []string{}
- for p := pIni; p <= pEnd; p++ {
- hosts = append(hosts, fmt.Sprintf("%s:%d%s", us, p, ue))
- }
- return hosts, nil
- }
- }
- }
-
- return []string{u}, nil
-
-}
-
-func parseBlock(c *caddyfile.Dispenser, u *staticUpstream) error {
- switch c.Val() {
- case "policy":
- if !c.NextArg() {
- return c.ArgErr()
- }
- policyCreateFunc, ok := supportedPolicies[c.Val()]
- if !ok {
- return c.ArgErr()
- }
- arg := ""
- if c.NextArg() {
- arg = c.Val()
- }
- u.Policy = policyCreateFunc(arg)
- case "fail_timeout":
- if !c.NextArg() {
- return c.ArgErr()
- }
- dur, err := time.ParseDuration(c.Val())
- if err != nil {
- return err
- }
- u.FailTimeout = dur
- case "max_fails":
- if !c.NextArg() {
- return c.ArgErr()
- }
- n, err := strconv.Atoi(c.Val())
- if err != nil {
- return err
- }
- if n < 1 {
- return c.Err("max_fails must be at least 1")
- }
- u.MaxFails = int32(n)
- case "try_duration":
- if !c.NextArg() {
- return c.ArgErr()
- }
- dur, err := time.ParseDuration(c.Val())
- if err != nil {
- return err
- }
- u.TryDuration = dur
- case "try_interval":
- if !c.NextArg() {
- return c.ArgErr()
- }
- interval, err := time.ParseDuration(c.Val())
- if err != nil {
- return err
- }
- u.TryInterval = interval
- case "max_conns":
- if !c.NextArg() {
- return c.ArgErr()
- }
- n, err := strconv.ParseInt(c.Val(), 10, 64)
- if err != nil {
- return err
- }
- u.MaxConns = n
- case "health_check":
- if !c.NextArg() {
- return c.ArgErr()
- }
- u.HealthCheck.Path = c.Val()
-
- // Set defaults
- if u.HealthCheck.Interval == 0 {
- u.HealthCheck.Interval = 30 * time.Second
- }
- if u.HealthCheck.Timeout == 0 {
- u.HealthCheck.Timeout = 60 * time.Second
- }
- case "health_check_interval":
- var interval string
- if !c.Args(&interval) {
- return c.ArgErr()
- }
- dur, err := time.ParseDuration(interval)
- if err != nil {
- return err
- }
- u.HealthCheck.Interval = dur
- case "health_check_timeout":
- var interval string
- if !c.Args(&interval) {
- return c.ArgErr()
- }
- dur, err := time.ParseDuration(interval)
- if err != nil {
- return err
- }
- u.HealthCheck.Timeout = dur
- case "health_check_port":
- if !c.NextArg() {
- return c.ArgErr()
- }
- port := c.Val()
- n, err := strconv.Atoi(port)
- if err != nil {
- return err
- }
-
- if n < 0 {
- return c.Errf("invalid health_check_port '%s'", port)
- }
- u.HealthCheck.Port = port
- case "health_check_contains":
- if !c.NextArg() {
- return c.ArgErr()
- }
- u.HealthCheck.ContentString = c.Val()
- case "header_upstream":
- var header, value string
- if !c.Args(&header, &value) {
- // When removing a header, the value can be optional.
- if !strings.HasPrefix(header, "-") {
- return c.ArgErr()
- }
- }
- u.upstreamHeaders.Add(header, value)
- case "header_downstream":
- var header, value string
- if !c.Args(&header, &value) {
- // When removing a header, the value can be optional.
- if !strings.HasPrefix(header, "-") {
- return c.ArgErr()
- }
- }
- u.downstreamHeaders.Add(header, value)
- case "transparent":
- u.upstreamHeaders.Add("Host", "{host}")
- u.upstreamHeaders.Add("X-Real-IP", "{remote}")
- u.upstreamHeaders.Add("X-Forwarded-For", "{remote}")
- u.upstreamHeaders.Add("X-Forwarded-Proto", "{scheme}")
- case "websocket":
- u.upstreamHeaders.Add("Connection", "{>Connection}")
- u.upstreamHeaders.Add("Upgrade", "{>Upgrade}")
- case "without":
- if !c.NextArg() {
- return c.ArgErr()
- }
- u.WithoutPathPrefix = c.Val()
- case "except":
- ignoredPaths := c.RemainingArgs()
- if len(ignoredPaths) == 0 {
- return c.ArgErr()
- }
- u.IgnoredSubPaths = ignoredPaths
- case "insecure_skip_verify":
- u.insecureSkipVerify = true
- case "keepalive":
- if !c.NextArg() {
- return c.ArgErr()
- }
- n, err := strconv.Atoi(c.Val())
- if err != nil {
- return err
- }
- if n < 0 {
- return c.ArgErr()
- }
- u.KeepAlive = n
- default:
- return c.Errf("unknown property '%s'", c.Val())
- }
- return nil
-}
-
-func (u *staticUpstream) healthCheck() {
- for _, host := range u.Hosts {
- hostURL := host.Name
- if u.HealthCheck.Port != "" {
- hostURL = replacePort(host.Name, u.HealthCheck.Port)
- }
- hostURL += u.HealthCheck.Path
-
- unhealthy := func() bool {
- // set up request, needed to be able to modify headers
- // possible errors are bad HTTP methods or un-parsable urls
- req, err := http.NewRequest("GET", hostURL, nil)
- if err != nil {
- return true
- }
- // set host for request going upstream
- if u.HealthCheck.Host != "" {
- req.Host = u.HealthCheck.Host
- }
- r, err := u.HealthCheck.Client.Do(req)
- if err != nil {
- return true
- }
- defer func() {
- io.Copy(ioutil.Discard, r.Body)
- r.Body.Close()
- }()
- if r.StatusCode < 200 || r.StatusCode >= 400 {
- return true
- }
- if u.HealthCheck.ContentString == "" { // don't check for content string
- return false
- }
- // TODO ReadAll will be replaced if deemed necessary
- // See https://github.com/mholt/caddy/pull/1691
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return true
- }
- if bytes.Contains(buf, []byte(u.HealthCheck.ContentString)) {
- return false
- }
- return true
- }()
- if unhealthy {
- atomic.StoreInt32(&host.Unhealthy, 1)
- } else {
- atomic.StoreInt32(&host.Unhealthy, 0)
- }
- }
-}
-
-func (u *staticUpstream) HealthCheckWorker(stop chan struct{}) {
- ticker := time.NewTicker(u.HealthCheck.Interval)
- u.healthCheck()
- for {
- select {
- case <-ticker.C:
- u.healthCheck()
- case <-stop:
- ticker.Stop()
- return
- }
- }
-}
-
-func (u *staticUpstream) Select(r *http.Request) *UpstreamHost {
- pool := u.Hosts
- if len(pool) == 1 {
- if !pool[0].Available() {
- return nil
- }
- return pool[0]
- }
- allUnavailable := true
- for _, host := range pool {
- if host.Available() {
- allUnavailable = false
- break
- }
- }
- if allUnavailable {
- return nil
- }
- if u.Policy == nil {
- return (&Random{}).Select(pool, r)
- }
- return u.Policy.Select(pool, r)
-}
-
-func (u *staticUpstream) AllowedPath(requestPath string) bool {
- for _, ignoredSubPath := range u.IgnoredSubPaths {
- if httpserver.Path(path.Clean(requestPath)).Matches(path.Join(u.From(), ignoredSubPath)) {
- return false
- }
- }
- return true
-}
-
-// GetTryDuration returns u.TryDuration.
-func (u *staticUpstream) GetTryDuration() time.Duration {
- return u.TryDuration
-}
-
-// GetTryInterval returns u.TryInterval.
-func (u *staticUpstream) GetTryInterval() time.Duration {
- return u.TryInterval
-}
-
-func (u *staticUpstream) GetHostCount() int {
- return len(u.Hosts)
-}
-
-// Stop sends a signal to all goroutines started by this staticUpstream to exit
-// and waits for them to finish before returning.
-func (u *staticUpstream) Stop() error {
- close(u.stop)
- u.wg.Wait()
- return nil
-}
-
-// RegisterPolicy adds a custom policy to the proxy.
-func RegisterPolicy(name string, policy func(string) Policy) {
- supportedPolicies[name] = policy
-}
-
-func replacePort(originalURL string, newPort string) string {
- parsedURL, err := url.Parse(originalURL)
- if err != nil {
- return originalURL
- }
-
- // handles 'localhost' and 'localhost:8080'
- parsedHost, _, err := net.SplitHostPort(parsedURL.Host)
- if err != nil {
- parsedHost = parsedURL.Host
- }
-
- parsedURL.Host = net.JoinHostPort(parsedHost, newPort)
- return parsedURL.String()
-}
diff --git a/caddyhttp/proxy/upstream_test.go b/caddyhttp/proxy/upstream_test.go
deleted file mode 100644
index 8d1ef7198f0..00000000000
--- a/caddyhttp/proxy/upstream_test.go
+++ /dev/null
@@ -1,503 +0,0 @@
-package proxy
-
-import (
- "fmt"
- "net"
- "net/http"
- "net/http/httptest"
- "strings"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/mholt/caddy/caddyfile"
-)
-
-func TestNewHost(t *testing.T) {
- upstream := &staticUpstream{
- FailTimeout: 10 * time.Second,
- MaxConns: 1,
- MaxFails: 1,
- }
-
- uh, err := upstream.NewHost("example.com")
- if err != nil {
- t.Error("Expected no error")
- }
- if uh.Name != "http://example.com" {
- t.Error("Expected default schema to be added to Name.")
- }
- if uh.FailTimeout != upstream.FailTimeout {
- t.Error("Expected default FailTimeout to be set.")
- }
- if uh.MaxConns != upstream.MaxConns {
- t.Error("Expected default MaxConns to be set.")
- }
- if uh.CheckDown == nil {
- t.Error("Expected default CheckDown to be set.")
- }
- if uh.CheckDown(uh) {
- t.Error("Expected new host not to be down.")
- }
- // mark Unhealthy
- uh.Unhealthy = 1
- if !uh.CheckDown(uh) {
- t.Error("Expected unhealthy host to be down.")
- }
- // mark with Fails
- uh.Unhealthy = 0
- uh.Fails = 1
- if !uh.CheckDown(uh) {
- t.Error("Expected failed host to be down.")
- }
-}
-
-func TestHealthCheck(t *testing.T) {
- upstream := &staticUpstream{
- from: "",
- Hosts: testPool(),
- Policy: &Random{},
- FailTimeout: 10 * time.Second,
- MaxFails: 1,
- }
- upstream.healthCheck()
- if upstream.Hosts[0].Down() {
- t.Error("Expected first host in testpool to not fail healthcheck.")
- }
- if !upstream.Hosts[1].Down() {
- t.Error("Expected second host in testpool to fail healthcheck.")
- }
-}
-
-func TestSelect(t *testing.T) {
- upstream := &staticUpstream{
- from: "",
- Hosts: testPool()[:3],
- Policy: &Random{},
- FailTimeout: 10 * time.Second,
- MaxFails: 1,
- }
- r, _ := http.NewRequest("GET", "/", nil)
- upstream.Hosts[0].Unhealthy = 1
- upstream.Hosts[1].Unhealthy = 1
- upstream.Hosts[2].Unhealthy = 1
- if h := upstream.Select(r); h != nil {
- t.Error("Expected select to return nil as all host are down")
- }
- upstream.Hosts[2].Unhealthy = 0
- if h := upstream.Select(r); h == nil {
- t.Error("Expected select to not return nil")
- }
- upstream.Hosts[0].Conns = 1
- upstream.Hosts[0].MaxConns = 1
- upstream.Hosts[1].Conns = 1
- upstream.Hosts[1].MaxConns = 1
- upstream.Hosts[2].Conns = 1
- upstream.Hosts[2].MaxConns = 1
- if h := upstream.Select(r); h != nil {
- t.Error("Expected select to return nil as all hosts are full")
- }
- upstream.Hosts[2].Conns = 0
- if h := upstream.Select(r); h == nil {
- t.Error("Expected select to not return nil")
- }
-}
-
-func TestRegisterPolicy(t *testing.T) {
- name := "custom"
- customPolicy := &customPolicy{}
- RegisterPolicy(name, func(string) Policy { return customPolicy })
- if _, ok := supportedPolicies[name]; !ok {
- t.Error("Expected supportedPolicies to have a custom policy.")
- }
-
-}
-
-func TestAllowedPaths(t *testing.T) {
- upstream := &staticUpstream{
- from: "/proxy",
- IgnoredSubPaths: []string{"/download", "/static"},
- }
- tests := []struct {
- url string
- expected bool
- }{
- {"/proxy", true},
- {"/proxy/dl", true},
- {"/proxy/download", false},
- {"/proxy/download/static", false},
- {"/proxy/static", false},
- {"/proxy/static/download", false},
- {"/proxy/something/download", true},
- {"/proxy/something/static", true},
- {"/proxy//static", false},
- {"/proxy//static//download", false},
- {"/proxy//download", false},
- }
-
- for i, test := range tests {
- allowed := upstream.AllowedPath(test.url)
- if test.expected != allowed {
- t.Errorf("Test %d: expected %v found %v", i+1, test.expected, allowed)
- }
- }
-}
-
-func TestParseBlockHealthCheck(t *testing.T) {
- tests := []struct {
- config string
- interval string
- timeout string
- }{
- // Test #1: Both options set correct time
- {"health_check /health\n health_check_interval 10s\n health_check_timeout 20s", "10s", "20s"},
-
- // Test #2: Health check options flipped around. Making sure health_check doesn't overwrite it
- {"health_check_interval 10s\n health_check_timeout 20s\n health_check /health", "10s", "20s"},
-
- // Test #3: No health_check options. So default.
- {"health_check /health", "30s", "1m0s"},
-
- // Test #4: Interval sets it to 15s and timeout defaults
- {"health_check /health\n health_check_interval 15s", "15s", "1m0s"},
-
- // Test #5: Timeout sets it to 15s and interval defaults
- {"health_check /health\n health_check_timeout 15s", "30s", "15s"},
-
- // Test #6: Some funky spelling to make sure it still defaults
- {"health_check /health health_check_time 15s", "30s", "1m0s"},
- }
-
- for i, test := range tests {
- u := staticUpstream{}
- c := caddyfile.NewDispenser("Testfile", strings.NewReader(test.config))
- for c.Next() {
- parseBlock(&c, &u)
- }
- if u.HealthCheck.Interval.String() != test.interval {
- t.Errorf(
- "Test %d: HealthCheck interval not the same from config. Got %v. Expected: %v",
- i+1,
- u.HealthCheck.Interval,
- test.interval,
- )
- }
- if u.HealthCheck.Timeout.String() != test.timeout {
- t.Errorf(
- "Test %d: HealthCheck timeout not the same from config. Got %v. Expected: %v",
- i+1,
- u.HealthCheck.Timeout,
- test.timeout,
- )
- }
- }
-}
-
-func TestStop(t *testing.T) {
- config := "proxy / %s {\n health_check /healthcheck \nhealth_check_interval %dms \n}"
- tests := []struct {
- name string
- intervalInMilliseconds int
- numHealthcheckIntervals int
- }{
- {
- "No Healthchecks After Stop - 5ms, 1 intervals",
- 5,
- 1,
- },
- {
- "No Healthchecks After Stop - 5ms, 2 intervals",
- 5,
- 2,
- },
- {
- "No Healthchecks After Stop - 5ms, 3 intervals",
- 5,
- 3,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
-
- // Set up proxy.
- var counter int64
- backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- r.Body.Close()
- atomic.AddInt64(&counter, 1)
- }))
-
- defer backend.Close()
-
- upstreams, err := NewStaticUpstreams(caddyfile.NewDispenser("Testfile", strings.NewReader(fmt.Sprintf(config, backend.URL, test.intervalInMilliseconds))), "")
- if err != nil {
- t.Error("Expected no error. Got:", err.Error())
- }
-
- // Give some time for healthchecks to hit the server.
- time.Sleep(time.Duration(test.intervalInMilliseconds*test.numHealthcheckIntervals) * time.Millisecond)
-
- for _, upstream := range upstreams {
- if err := upstream.Stop(); err != nil {
- t.Error("Expected no error stopping upstream. Got: ", err.Error())
- }
- }
-
- counterValueAfterShutdown := atomic.LoadInt64(&counter)
-
- // Give some time to see if healthchecks are still hitting the server.
- time.Sleep(time.Duration(test.intervalInMilliseconds*test.numHealthcheckIntervals) * time.Millisecond)
-
- if counterValueAfterShutdown == 0 {
- t.Error("Expected healthchecks to hit test server. Got no healthchecks.")
- }
-
- counterValueAfterWaiting := atomic.LoadInt64(&counter)
- if counterValueAfterWaiting != counterValueAfterShutdown {
- t.Errorf("Expected no more healthchecks after shutdown. Got: %d healthchecks after shutdown", counterValueAfterWaiting-counterValueAfterShutdown)
- }
-
- })
-
- }
-}
-
-func TestParseBlock(t *testing.T) {
- r, _ := http.NewRequest("GET", "/", nil)
- tests := []struct {
- config string
- }{
- // Test #1: transparent preset
- {"proxy / localhost:8080 {\n transparent \n}"},
-
- // Test #2: transparent preset with another param
- {"proxy / localhost:8080 {\n transparent \nheader_upstream X-Test Tester \n}"},
-
- // Test #3: transparent preset on multiple sites
- {"proxy / localhost:8080 {\n transparent \n} \nproxy /api localhost:8081 { \ntransparent \n}"},
- }
-
- for i, test := range tests {
- upstreams, err := NewStaticUpstreams(caddyfile.NewDispenser("Testfile", strings.NewReader(test.config)), "")
- if err != nil {
- t.Errorf("Expected no error. Got: %s", err.Error())
- }
- for _, upstream := range upstreams {
- headers := upstream.Select(r).UpstreamHeaders
-
- if _, ok := headers["Host"]; !ok {
- t.Errorf("Test %d: Could not find the Host header", i+1)
- }
-
- if _, ok := headers["X-Real-Ip"]; !ok {
- t.Errorf("Test %d: Could not find the X-Real-Ip header", i+1)
- }
-
- if _, ok := headers["X-Forwarded-Proto"]; !ok {
- t.Errorf("Test %d: Could not find the X-Forwarded-Proto header", i+1)
- }
- }
- }
-}
-
-func TestHealthSetUp(t *testing.T) {
- // tests for insecure skip verify
- tests := []struct {
- config string
- flag bool
- }{
- // Test #1: without flag
- {"proxy / localhost:8080 {\n health_check / \n}", false},
-
- // Test #2: with flag
- {"proxy / localhost:8080 {\n health_check / \n insecure_skip_verify \n}", true},
- }
-
- for i, test := range tests {
- upstreams, err := NewStaticUpstreams(caddyfile.NewDispenser("Testfile", strings.NewReader(test.config)), "")
- if err != nil {
- t.Errorf("Expected no error. Got: %s", err.Error())
- }
- for _, upstream := range upstreams {
- staticUpstream, ok := upstream.(*staticUpstream)
- if !ok {
- t.Errorf("Type mismatch: %#v", upstream)
- continue
- }
- transport, ok := staticUpstream.HealthCheck.Client.Transport.(*http.Transport)
- if !ok {
- t.Errorf("Type mismatch: %#v", staticUpstream.HealthCheck.Client.Transport)
- continue
- }
- if test.flag != transport.TLSClientConfig.InsecureSkipVerify {
- t.Errorf("Test %d: expected transport.TLSClientCnfig.InsecureSkipVerify=%v, got %v", i, test.flag, transport.TLSClientConfig.InsecureSkipVerify)
- }
- }
- }
-}
-
-func TestHealthCheckHost(t *testing.T) {
- // tests for upstream host on health checks
- tests := []struct {
- config string
- flag bool
- host string
- }{
- // Test #1: without upstream header
- {"proxy / localhost:8080 {\n health_check / \n}", false, "example.com"},
-
- // Test #2: without upstream header, missing host
- {"proxy / localhost:8080 {\n health_check / \n}", true, ""},
-
- // Test #3: with upstream header (via transparent preset)
- {"proxy / localhost:8080 {\n health_check / \n transparent \n}", true, "foo.example.com"},
-
- // Test #4: with upstream header (explicit header)
- {"proxy / localhost:8080 {\n health_check / \n header_upstream Host {host} \n}", true, "example.com"},
-
- // Test #5: with upstream header, missing host
- {"proxy / localhost:8080 {\n health_check / \n transparent \n}", true, ""},
- }
-
- for i, test := range tests {
- upstreams, err := NewStaticUpstreams(caddyfile.NewDispenser("Testfile", strings.NewReader(test.config)), test.host)
- if err != nil {
- t.Errorf("Expected no error. Got: %s", err.Error())
- }
- for _, upstream := range upstreams {
- staticUpstream, ok := upstream.(*staticUpstream)
- if !ok {
- t.Errorf("Type mismatch: %#v", upstream)
- continue
- }
- if test.flag != (staticUpstream.HealthCheck.Host == test.host) {
- t.Errorf("Test %d: expected staticUpstream.HealthCheck.Host=%v, got %v", i, test.host, staticUpstream.HealthCheck.Host)
- }
- }
- }
-}
-
-func TestHealthCheckPort(t *testing.T) {
- var counter int64
-
- healthCounter := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- r.Body.Close()
- atomic.AddInt64(&counter, 1)
- }))
-
- _, healthPort, err := net.SplitHostPort(healthCounter.Listener.Addr().String())
- if err != nil {
- t.Fatal(err)
- }
-
- defer healthCounter.Close()
-
- tests := []struct {
- config string
- }{
- // Test #1: upstream with port
- {"proxy / localhost:8080 {\n health_check / health_check_port " + healthPort + "\n}"},
-
- // Test #2: upstream without port (default to 80)
- {"proxy / localhost {\n health_check / health_check_port " + healthPort + "\n}"},
- }
-
- for i, test := range tests {
- counterValueAtStart := atomic.LoadInt64(&counter)
- upstreams, err := NewStaticUpstreams(caddyfile.NewDispenser("Testfile", strings.NewReader(test.config)), "")
- if err != nil {
- t.Error("Expected no error. Got:", err.Error())
- }
-
- // Give some time for healthchecks to hit the server.
- time.Sleep(500 * time.Millisecond)
-
- for _, upstream := range upstreams {
- if err := upstream.Stop(); err != nil {
- t.Errorf("Test %d: Expected no error stopping upstream. Got: %v", i, err.Error())
- }
- }
-
- counterValueAfterShutdown := atomic.LoadInt64(&counter)
-
- if counterValueAfterShutdown == counterValueAtStart {
- t.Errorf("Test %d: Expected healthchecks to hit test server. Got no healthchecks.", i)
- }
- }
-
- t.Run("valid_port", func(t *testing.T) {
- tests := []struct {
- config string
- }{
- // Test #1: invalid port (nil)
- {"proxy / localhost {\n health_check / health_check_port\n}"},
-
- // Test #2: invalid port (string)
- {"proxy / localhost {\n health_check / health_check_port abc\n}"},
-
- // Test #3: invalid port (negative)
- {"proxy / localhost {\n health_check / health_check_port -1\n}"},
- }
-
- for i, test := range tests {
- _, err := NewStaticUpstreams(caddyfile.NewDispenser("Testfile", strings.NewReader(test.config)), "")
- if err == nil {
- t.Errorf("Test %d accepted invalid config", i)
- }
- }
- })
-
-}
-
-func TestHealthCheckContentString(t *testing.T) {
- server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprintf(w, "blablabla good blablabla")
- r.Body.Close()
- }))
- _, port, err := net.SplitHostPort(server.Listener.Addr().String())
- if err != nil {
- t.Fatal(err)
- }
- defer server.Close()
-
- tests := []struct {
- config string
- shouldContain bool
- }{
- {"proxy / localhost:" + port +
- " { health_check /testhealth " +
- " health_check_contains good\n}",
- true,
- },
- {"proxy / localhost:" + port + " {\n health_check /testhealth health_check_port " + port +
- " \n health_check_contains bad\n}",
- false,
- },
- }
- for i, test := range tests {
- u, err := NewStaticUpstreams(caddyfile.NewDispenser("Testfile", strings.NewReader(test.config)), "")
- if err != nil {
- t.Errorf("Expected no error. Test %d Got: %s", i, err.Error())
- }
- for _, upstream := range u {
- staticUpstream, ok := upstream.(*staticUpstream)
- if !ok {
- t.Errorf("Type mismatch: %#v", upstream)
- continue
- }
- staticUpstream.healthCheck()
- for _, host := range staticUpstream.Hosts {
- if test.shouldContain && atomic.LoadInt32(&host.Unhealthy) == 0 {
- // healthcheck url was hit and the required test string was found
- continue
- }
- if !test.shouldContain && atomic.LoadInt32(&host.Unhealthy) != 0 {
- // healthcheck url was hit and the required string was not found
- continue
- }
- t.Errorf("Health check bad response")
- }
- upstream.Stop()
- }
- }
-}
diff --git a/caddyhttp/push/handler.go b/caddyhttp/push/handler.go
deleted file mode 100644
index fcc6ab0b08e..00000000000
--- a/caddyhttp/push/handler.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package push
-
-import (
- "net/http"
- "strings"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
- "github.com/mholt/caddy/caddyhttp/staticfiles"
-)
-
-func (h Middleware) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- pusher, hasPusher := w.(http.Pusher)
-
- // no push possible, carry on
- if !hasPusher {
- return h.Next.ServeHTTP(w, r)
- }
-
- // check if this is a request for the pushed resource (avoid recursion)
- if _, exists := r.Header[pushHeader]; exists {
- return h.Next.ServeHTTP(w, r)
- }
-
- headers := h.filterProxiedHeaders(r.Header)
-
- // push first
-outer:
- for _, rule := range h.Rules {
- urlPath := r.URL.Path
- matches := httpserver.Path(urlPath).Matches(rule.Path)
- // Also check IndexPages when requesting a directory
- if !matches {
- _, matches = httpserver.IndexFile(h.Root, urlPath, staticfiles.IndexPages)
- }
- if matches {
- for _, resource := range rule.Resources {
- pushErr := pusher.Push(resource.Path, &http.PushOptions{
- Method: resource.Method,
- Header: h.mergeHeaders(headers, resource.Header),
- })
- if pushErr != nil {
- // if we cannot push (either not supported or concurrent streams are full - break)
- break outer
- }
- }
- }
- }
-
- // serve later
- code, err := h.Next.ServeHTTP(w, r)
-
- // push resources returned in Link headers from upstream middlewares or proxied apps
- if links, exists := w.Header()["Link"]; exists {
- h.servePreloadLinks(pusher, headers, links)
- }
-
- return code, err
-}
-
-func (h Middleware) servePreloadLinks(pusher http.Pusher, headers http.Header, links []string) {
- for _, link := range links {
- parts := strings.Split(link, ";")
-
- if link == "" || strings.HasSuffix(link, "nopush") {
- continue
- }
-
- target := strings.TrimSuffix(strings.TrimPrefix(parts[0], "<"), ">")
-
- err := pusher.Push(target, &http.PushOptions{
- Method: http.MethodGet,
- Header: headers,
- })
-
- if err != nil {
- break
- }
- }
-}
-
-func (h Middleware) mergeHeaders(l, r http.Header) http.Header {
- out := http.Header{}
-
- for k, v := range l {
- out[k] = v
- }
-
- for k, vv := range r {
- for _, v := range vv {
- out.Add(k, v)
- }
- }
-
- return out
-}
-
-func (h Middleware) filterProxiedHeaders(headers http.Header) http.Header {
- filter := http.Header{}
-
- for _, header := range proxiedHeaders {
- if val, ok := headers[header]; ok {
- filter[header] = val
- }
- }
-
- return filter
-}
-
-var proxiedHeaders = []string{
- "Accept-Encoding",
- "Accept-Language",
- "Cache-Control",
- "Host",
- "User-Agent",
-}
diff --git a/caddyhttp/push/handler_test.go b/caddyhttp/push/handler_test.go
deleted file mode 100644
index 40903b6fa8b..00000000000
--- a/caddyhttp/push/handler_test.go
+++ /dev/null
@@ -1,389 +0,0 @@
-package push
-
-import (
- "errors"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "os"
- "path/filepath"
- "reflect"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-type MockedPusher struct {
- http.ResponseWriter
- pushed map[string]*http.PushOptions
- returnedError error
-}
-
-func (w *MockedPusher) Push(target string, options *http.PushOptions) error {
- if w.pushed == nil {
- w.pushed = make(map[string]*http.PushOptions)
- }
-
- w.pushed[target] = options
- return w.returnedError
-}
-
-func TestMiddlewareWillPushResources(t *testing.T) {
-
- // given
- request, err := http.NewRequest(http.MethodGet, "/index.html", nil)
- writer := httptest.NewRecorder()
-
- if err != nil {
- t.Fatalf("Could not create HTTP request: %v", err)
- }
-
- middleware := Middleware{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- return 0, nil
- }),
- Rules: []Rule{
- {Path: "/index.html", Resources: []Resource{
- {Path: "/index.css", Method: http.MethodHead, Header: http.Header{"Test": []string{"Value"}}},
- {Path: "/index2.css", Method: http.MethodGet},
- }},
- },
- }
-
- pushingWriter := &MockedPusher{ResponseWriter: writer}
-
- // when
- middleware.ServeHTTP(pushingWriter, request)
-
- // then
- expectedPushedResources := map[string]*http.PushOptions{
- "/index.css": {
- Method: http.MethodHead,
- Header: http.Header{"Test": []string{"Value"}},
- },
-
- "/index2.css": {
- Method: http.MethodGet,
- Header: http.Header{},
- },
- }
-
- comparePushedResources(t, expectedPushedResources, pushingWriter.pushed)
-}
-
-func TestMiddlewareWillPushResourcesWithMergedHeaders(t *testing.T) {
-
- // given
- request, err := http.NewRequest(http.MethodGet, "/index.html", nil)
- request.Header = http.Header{"Accept-Encoding": []string{"br"}, "Invalid-Header": []string{"Should be filter out"}}
- writer := httptest.NewRecorder()
-
- if err != nil {
- t.Fatalf("Could not create HTTP request: %v", err)
- }
-
- middleware := Middleware{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- return 0, nil
- }),
- Rules: []Rule{
- {Path: "/index.html", Resources: []Resource{
- {Path: "/index.css", Method: http.MethodHead, Header: http.Header{"Test": []string{"Value"}}},
- {Path: "/index2.css", Method: http.MethodGet},
- }},
- },
- }
-
- pushingWriter := &MockedPusher{ResponseWriter: writer}
-
- // when
- middleware.ServeHTTP(pushingWriter, request)
-
- // then
- expectedPushedResources := map[string]*http.PushOptions{
- "/index.css": {
- Method: http.MethodHead,
- Header: http.Header{"Test": []string{"Value"}, "Accept-Encoding": []string{"br"}},
- },
-
- "/index2.css": {
- Method: http.MethodGet,
- Header: http.Header{"Accept-Encoding": []string{"br"}},
- },
- }
-
- comparePushedResources(t, expectedPushedResources, pushingWriter.pushed)
-}
-
-func TestMiddlewareShouldntDoRecursivePush(t *testing.T) {
-
- // given
- request, err := http.NewRequest(http.MethodGet, "/index.css", nil)
- request.Header.Add(pushHeader, "")
-
- writer := httptest.NewRecorder()
-
- if err != nil {
- t.Fatalf("Could not create HTTP request: %v", err)
- }
-
- middleware := Middleware{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- return 0, nil
- }),
- Rules: []Rule{
- {Path: "/", Resources: []Resource{
- {Path: "/index.css", Method: http.MethodHead, Header: http.Header{"Test": []string{"Value"}}},
- {Path: "/index2.css", Method: http.MethodGet},
- }},
- },
- }
-
- pushingWriter := &MockedPusher{ResponseWriter: writer}
-
- // when
- middleware.ServeHTTP(pushingWriter, request)
-
- // then
- if len(pushingWriter.pushed) > 0 {
- t.Errorf("Expected 0 pushed resources, actual %d", len(pushingWriter.pushed))
- }
-}
-
-func TestMiddlewareShouldStopPushingOnError(t *testing.T) {
-
- // given
- request, err := http.NewRequest(http.MethodGet, "/index.html", nil)
- writer := httptest.NewRecorder()
-
- if err != nil {
- t.Fatalf("Could not create HTTP request: %v", err)
- }
-
- middleware := Middleware{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- return 0, nil
- }),
- Rules: []Rule{
- {Path: "/index.html", Resources: []Resource{
- {Path: "/only.css", Method: http.MethodHead, Header: http.Header{"Test": []string{"Value"}}},
- {Path: "/index2.css", Method: http.MethodGet},
- {Path: "/index3.css", Method: http.MethodGet},
- }},
- },
- }
-
- pushingWriter := &MockedPusher{ResponseWriter: writer, returnedError: errors.New("Cannot push right now")}
-
- // when
- middleware.ServeHTTP(pushingWriter, request)
-
- // then
- expectedPushedResources := map[string]*http.PushOptions{
- "/only.css": {
- Method: http.MethodHead,
- Header: http.Header{"Test": []string{"Value"}},
- },
- }
-
- comparePushedResources(t, expectedPushedResources, pushingWriter.pushed)
-}
-
-func TestMiddlewareWillNotPushResources(t *testing.T) {
- // given
- request, err := http.NewRequest(http.MethodGet, "/index.html", nil)
-
- if err != nil {
- t.Fatalf("Could not create HTTP request: %v", err)
- }
-
- middleware := Middleware{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- return 0, nil
- }),
- Rules: []Rule{
- {Path: "/index.html", Resources: []Resource{
- {Path: "/index.css", Method: http.MethodHead, Header: http.Header{"Test": []string{"Value"}}},
- {Path: "/index2.css", Method: http.MethodGet},
- }},
- },
- }
-
- writer := httptest.NewRecorder()
-
- // when
- _, err2 := middleware.ServeHTTP(writer, request)
-
- // then
- if err2 != nil {
- t.Error("Should not return error")
- }
-}
-
-func TestMiddlewareShouldInterceptLinkHeader(t *testing.T) {
- // given
- request, err := http.NewRequest(http.MethodGet, "/index.html", nil)
- writer := httptest.NewRecorder()
-
- if err != nil {
- t.Fatalf("Could not create HTTP request: %v", err)
- }
-
- middleware := Middleware{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- w.Header().Add("Link", "; rel=preload; as=stylesheet;")
- w.Header().Add("Link", "; rel=preload; as=stylesheet;")
- w.Header().Add("Link", "")
- w.Header().Add("Link", "")
- w.Header().Add("Link", "; rel=preload; nopush")
- return 0, nil
- }),
- Rules: []Rule{},
- }
-
- pushingWriter := &MockedPusher{ResponseWriter: writer}
-
- // when
- _, err2 := middleware.ServeHTTP(pushingWriter, request)
-
- // then
- if err2 != nil {
- t.Error("Should not return error")
- }
-
- expectedPushedResources := map[string]*http.PushOptions{
- "/index.css": {
- Method: http.MethodGet,
- Header: http.Header{},
- },
- "/index2.css": {
- Method: http.MethodGet,
- Header: http.Header{},
- },
- "/index3.css": {
- Method: http.MethodGet,
- Header: http.Header{},
- },
- }
-
- comparePushedResources(t, expectedPushedResources, pushingWriter.pushed)
-}
-
-func TestMiddlewareShouldInterceptLinkHeaderPusherError(t *testing.T) {
- // given
- expectedHeaders := http.Header{"Accept-Encoding": []string{"br"}}
- request, err := http.NewRequest(http.MethodGet, "/index.html", nil)
- request.Header = http.Header{"Accept-Encoding": []string{"br"}, "Invalid-Header": []string{"Should be filter out"}}
-
- writer := httptest.NewRecorder()
-
- if err != nil {
- t.Fatalf("Could not create HTTP request: %v", err)
- }
-
- middleware := Middleware{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- w.Header().Add("Link", "; rel=preload; as=stylesheet;")
- w.Header().Add("Link", "; rel=preload; as=stylesheet;")
- return 0, nil
- }),
- Rules: []Rule{},
- }
-
- pushingWriter := &MockedPusher{ResponseWriter: writer, returnedError: errors.New("Cannot push right now")}
-
- // when
- _, err2 := middleware.ServeHTTP(pushingWriter, request)
-
- // then
- if err2 != nil {
- t.Error("Should not return error")
- }
-
- expectedPushedResources := map[string]*http.PushOptions{
- "/index.css": {
- Method: http.MethodGet,
- Header: expectedHeaders,
- },
- }
-
- comparePushedResources(t, expectedPushedResources, pushingWriter.pushed)
-}
-
-func TestMiddlewareShouldPushIndexFile(t *testing.T) {
- // given
- indexFile := "/index.html"
- request, err := http.NewRequest(http.MethodGet, "/", nil) // Request root directory, not indexfile itself
- if err != nil {
- t.Fatalf("Could not create HTTP request: %v", err)
- }
-
- root, err := ioutil.TempDir("", "caddy")
- if err != nil {
- t.Fatalf("Could not create temporary directory: %v", err)
- }
- defer os.Remove(root)
-
- middleware := Middleware{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- return 0, nil
- }),
- Rules: []Rule{
- {Path: indexFile, Resources: []Resource{
- {Path: "/index.css", Method: http.MethodGet},
- }},
- },
- Root: http.Dir(root),
- }
-
- indexFilePath := filepath.Join(root, indexFile)
- _, err = os.Create(indexFilePath)
- if err != nil {
- t.Fatalf("Could not create index file: %s: %v", indexFile, err)
- }
- defer os.Remove(indexFilePath)
-
- pushingWriter := &MockedPusher{
- ResponseWriter: httptest.NewRecorder(),
- returnedError: errors.New("Cannot push right now"),
- }
-
- // when
- _, err2 := middleware.ServeHTTP(pushingWriter, request)
-
- // then
- if err2 != nil {
- t.Error("Should not return error")
- }
-
- expectedPushedResources := map[string]*http.PushOptions{
- "/index.css": {
- Method: http.MethodGet,
- Header: http.Header{},
- },
- }
-
- comparePushedResources(t, expectedPushedResources, pushingWriter.pushed)
-
-}
-
-func comparePushedResources(t *testing.T, expected, actual map[string]*http.PushOptions) {
- if len(expected) != len(actual) {
- t.Errorf("Expected %d pushed resources, actual: %d", len(expected), len(actual))
- }
-
- for target, expectedTarget := range expected {
- if actualTarget, exists := actual[target]; exists {
-
- if expectedTarget.Method != actualTarget.Method {
- t.Errorf("Expected %s resource method to be %s, actual: %s", target, expectedTarget.Method, actualTarget.Method)
- }
-
- if !reflect.DeepEqual(expectedTarget.Header, actualTarget.Header) {
- t.Errorf("Expected %s resource push headers to be %+v, actual: %+v", target, expectedTarget.Header, actualTarget.Header)
- }
- } else {
- t.Errorf("Expected %s to be pushed", target)
- }
- }
-}
diff --git a/caddyhttp/push/push.go b/caddyhttp/push/push.go
deleted file mode 100644
index 2c5821a5b95..00000000000
--- a/caddyhttp/push/push.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package push
-
-import (
- "net/http"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-type (
- // Rule describes conditions on which resources will be pushed
- Rule struct {
- Path string
- Resources []Resource
- }
-
- // Resource describes resource to be pushed
- Resource struct {
- Path string
- Method string
- Header http.Header
- }
-
- // Middleware supports pushing resources to clients
- Middleware struct {
- Next httpserver.Handler
- Rules []Rule
- Root http.FileSystem
- }
-
- ruleOp func([]Resource)
-)
diff --git a/caddyhttp/push/setup.go b/caddyhttp/push/setup.go
deleted file mode 100644
index 28e8dbc532e..00000000000
--- a/caddyhttp/push/setup.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package push
-
-import (
- "errors"
- "fmt"
- "net/http"
- "strings"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("push", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-var errInvalidHeader = errors.New("header directive requires [name] [value]")
-
-var errHeaderStartsWithColon = errors.New("header cannot start with colon")
-var errMethodNotSupported = errors.New("push supports only GET and HEAD methods")
-
-const pushHeader = "X-Push"
-
-var emptyRules = []Rule{}
-
-// setup configures a new Push middleware
-func setup(c *caddy.Controller) error {
- rules, err := parsePushRules(c)
-
- if err != nil {
- return err
- }
-
- cfg := httpserver.GetConfig(c)
- cfg.AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- return Middleware{Next: next, Rules: rules, Root: http.Dir(cfg.Root)}
- })
-
- return nil
-}
-
-func parsePushRules(c *caddy.Controller) ([]Rule, error) {
- var rules = make(map[string]*Rule)
-
- for c.NextLine() {
- var rule *Rule
- var resources []Resource
- var ops []ruleOp
-
- parseBlock := func() error {
- for c.NextBlock() {
- val := c.Val()
-
- switch val {
- case "method":
- if !c.NextArg() {
- return c.ArgErr()
- }
-
- method := c.Val()
-
- if err := validateMethod(method); err != nil {
- return errMethodNotSupported
- }
-
- ops = append(ops, setMethodOp(method))
-
- case "header":
- args := c.RemainingArgs()
-
- if len(args) != 2 {
- return errInvalidHeader
- }
-
- if err := validateHeader(args[0]); err != nil {
- return err
- }
-
- ops = append(ops, setHeaderOp(args[0], args[1]))
- default:
- resources = append(resources, Resource{
- Path: val,
- Method: http.MethodGet,
- Header: http.Header{pushHeader: []string{}},
- })
- }
- }
- return nil
- }
-
- args := c.RemainingArgs()
-
- if len(args) == 0 {
- rule = new(Rule)
- rule.Path = "/"
- rules["/"] = rule
- err := parseBlock()
- if err != nil {
- return emptyRules, err
- }
- } else {
- path := args[0]
-
- if existingRule, ok := rules[path]; ok {
- rule = existingRule
- } else {
- rule = new(Rule)
- rule.Path = path
- rules[rule.Path] = rule
- }
-
- for i := 1; i < len(args); i++ {
- resources = append(resources, Resource{
- Path: args[i],
- Method: http.MethodGet,
- Header: http.Header{pushHeader: []string{}},
- })
- }
-
- err := parseBlock()
- if err != nil {
- return emptyRules, err
- }
- }
-
- for _, op := range ops {
- op(resources)
- }
- rule.Resources = append(rule.Resources, resources...)
- }
-
- var returnRules []Rule
- for _, rule := range rules {
- returnRules = append(returnRules, *rule)
- }
-
- return returnRules, nil
-}
-
-func setHeaderOp(key, value string) func(resources []Resource) {
- return func(resources []Resource) {
- for index := range resources {
- resources[index].Header.Set(key, value)
- }
- }
-}
-
-func setMethodOp(method string) func(resources []Resource) {
- return func(resources []Resource) {
- for index := range resources {
- resources[index].Method = method
- }
- }
-}
-
-func validateHeader(header string) error {
- if strings.HasPrefix(header, ":") {
- return errHeaderStartsWithColon
- }
-
- switch strings.ToLower(header) {
- case "content-length", "content-encoding", "trailer", "te", "expect", "host":
- return fmt.Errorf("push headers cannot include %s", header)
- }
-
- return nil
-}
-
-// rules based on https://go-review.googlesource.com/#/c/29439/4/http2/go18.go#94
-func validateMethod(method string) error {
- if method != http.MethodGet && method != http.MethodHead {
- return errMethodNotSupported
- }
-
- return nil
-}
diff --git a/caddyhttp/push/setup_test.go b/caddyhttp/push/setup_test.go
deleted file mode 100644
index e1d21ff17a1..00000000000
--- a/caddyhttp/push/setup_test.go
+++ /dev/null
@@ -1,267 +0,0 @@
-package push
-
-import (
- "net/http"
- "reflect"
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestPushAvailable(t *testing.T) {
- err := setup(caddy.NewTestController("http", "push /index.html /available.css"))
-
- if err != nil {
- t.Fatalf("Error %s occurred, expected none", err)
- }
-}
-
-func TestConfigParse(t *testing.T) {
- tests := []struct {
- name string
- input string
- shouldErr bool
- expected []Rule
- }{
- {
- "ParseInvalidEmptyConfig", `push`, false, []Rule{{Path: "/"}},
- },
- {
- "ParseInvalidConfig", `push /index.html`, false, []Rule{{Path: "/index.html"}},
- },
- {
- "ParseInvalidConfigBlock", `push /index.html /index.css {
- method
- }`, true, []Rule{},
- },
- {
- "ParseInvalidHeaderFormat", `push /index.html /index.css {
- header :invalid value
- }`, true, []Rule{},
- },
- {
- "ParseForbiddenHeader", `push /index.html /index.css {
- header Content-Length 1000
- }`, true, []Rule{},
- },
- {
- "ParseInvalidMethod", `push /index.html /index.css {
- method POST
- }`, true, []Rule{},
- },
- {
- "ParseInvalidHeaderBlock", `push /index.html /index.css {
- header
- }`, true, []Rule{},
- },
- {
- "ParseInvalidHeaderBlock2", `push /index.html /index.css {
- header name
- }`, true, []Rule{},
- },
- {
- "ParseProperConfig", `push /index.html /style.css /style2.css`, false, []Rule{
- {
- Path: "/index.html",
- Resources: []Resource{
- {
- Path: "/style.css",
- Method: http.MethodGet,
- Header: http.Header{pushHeader: []string{}},
- },
- {
- Path: "/style2.css",
- Method: http.MethodGet,
- Header: http.Header{pushHeader: []string{}},
- },
- },
- },
- },
- },
- {
- "ParseSimpleInlinePush", `push /index.html {
- /style.css
- /style2.css
- }`, false, []Rule{
- {
- Path: "/index.html",
- Resources: []Resource{
- {
- Path: "/style.css",
- Method: http.MethodGet,
- Header: http.Header{pushHeader: []string{}},
- },
- {
- Path: "/style2.css",
- Method: http.MethodGet,
- Header: http.Header{pushHeader: []string{}},
- },
- },
- },
- },
- },
- {
- "ParseSimpleInlinePushWithOps", `push /index.html {
- /style.css
- /style2.css
- header Test Value
- }`, false, []Rule{
- {
- Path: "/index.html",
- Resources: []Resource{
- {
- Path: "/style.css",
- Method: http.MethodGet,
- Header: http.Header{pushHeader: []string{}, "Test": []string{"Value"}},
- },
- {
- Path: "/style2.css",
- Method: http.MethodGet,
- Header: http.Header{pushHeader: []string{}, "Test": []string{"Value"}},
- },
- },
- },
- },
- },
- {
- "ParseProperConfigWithBlock", `push /index.html /style.css /style2.css {
- method HEAD
- header Own-Header Value
- header Own-Header2 Value2
- }`, false, []Rule{
- {
- Path: "/index.html",
- Resources: []Resource{
- {
- Path: "/style.css",
- Method: http.MethodHead,
- Header: http.Header{
- "Own-Header": []string{"Value"},
- "Own-Header2": []string{"Value2"},
- "X-Push": []string{},
- },
- },
- {
- Path: "/style2.css",
- Method: http.MethodHead,
- Header: http.Header{
- "Own-Header": []string{"Value"},
- "Own-Header2": []string{"Value2"},
- "X-Push": []string{},
- },
- },
- },
- },
- },
- },
- {
- "ParseMergesRules", `push /index.html /index.css {
- header name value
- }
-
- push /index.html /index2.css {
- header name2 value2
- method HEAD
- }
- `, false, []Rule{
- {
- Path: "/index.html",
- Resources: []Resource{
- {
- Path: "/index.css",
- Method: http.MethodGet,
- Header: http.Header{
- "Name": []string{"value"},
- "X-Push": []string{},
- },
- },
- {
- Path: "/index2.css",
- Method: http.MethodHead,
- Header: http.Header{
- "Name2": []string{"value2"},
- "X-Push": []string{},
- },
- },
- },
- },
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t2 *testing.T) {
- actual, err := parsePushRules(caddy.NewTestController("http", test.input))
-
- if err == nil && test.shouldErr {
- t2.Errorf("Test %s didn't error, but it should have", test.name)
- } else if err != nil && !test.shouldErr {
- t2.Errorf("Test %s errored, but it shouldn't have; got '%v'", test.name, err)
- }
-
- if len(actual) != len(test.expected) {
- t2.Fatalf("Test %s expected %d rules, but got %d",
- test.name, len(test.expected), len(actual))
- }
-
- for j, expectedRule := range test.expected {
- actualRule := actual[j]
-
- if actualRule.Path != expectedRule.Path {
- t.Errorf("Test %s, rule %d: Expected path %s, but got %s",
- test.name, j, expectedRule.Path, actualRule.Path)
- }
-
- if !reflect.DeepEqual(actualRule.Resources, expectedRule.Resources) {
- t.Errorf("Test %s, rule %d: Expected resources %v, but got %v",
- test.name, j, expectedRule.Resources, actualRule.Resources)
- }
- }
- })
- }
-}
-
-func TestSetupInstalledMiddleware(t *testing.T) {
-
- // given
- c := caddy.NewTestController("http", `push /index.html /test.js`)
-
- // when
- err := setup(c)
-
- // then
- if err != nil {
- t.Errorf("Expected no errors, but got: %v", err)
- }
-
- middlewares := httpserver.GetConfig(c).Middleware()
-
- if len(middlewares) != 1 {
- t.Fatalf("Expected 1 middleware, had %d instead", len(middlewares))
- }
-
- handler := middlewares[0](httpserver.EmptyNext)
- pushHandler, ok := handler.(Middleware)
-
- if !ok {
- t.Fatalf("Expected handler to be type Middleware, got: %#v", handler)
- }
-
- if !httpserver.SameNext(pushHandler.Next, httpserver.EmptyNext) {
- t.Error("'Next' field of handler Middleware was not set properly")
- }
-}
-
-func TestSetupWithError(t *testing.T) {
- // given
- c := caddy.NewTestController("http", "push {\nmethod\n}")
-
- // when
- err := setup(c)
-
- // then
- if err == nil {
- t.Error("Expected error but none occurred")
- }
-}
diff --git a/caddyhttp/redirect/redirect.go b/caddyhttp/redirect/redirect.go
deleted file mode 100644
index 711313a8cad..00000000000
--- a/caddyhttp/redirect/redirect.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Package redirect is middleware for redirecting certain requests
-// to other locations.
-package redirect
-
-import (
- "fmt"
- "html"
- "net/http"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// Redirect is middleware to respond with HTTP redirects
-type Redirect struct {
- Next httpserver.Handler
- Rules []Rule
-}
-
-// ServeHTTP implements the httpserver.Handler interface.
-func (rd Redirect) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- for _, rule := range rd.Rules {
- if (rule.FromPath == "/" || r.URL.Path == rule.FromPath) && schemeMatches(rule, r) && rule.Match(r) {
- to := httpserver.NewReplacer(r, nil, "").Replace(rule.To)
- if rule.Meta {
- safeTo := html.EscapeString(to)
- fmt.Fprintf(w, metaRedir, safeTo, safeTo)
- } else {
- http.Redirect(w, r, to, rule.Code)
- }
- return 0, nil
- }
- }
- return rd.Next.ServeHTTP(w, r)
-}
-
-func schemeMatches(rule Rule, req *http.Request) bool {
- return (rule.FromScheme() == "https" && req.TLS != nil) ||
- (rule.FromScheme() != "https" && req.TLS == nil)
-}
-
-// Rule describes an HTTP redirect rule.
-type Rule struct {
- FromScheme func() string
- FromPath, To string
- Code int
- Meta bool
- httpserver.RequestMatcher
-}
-
-// Script tag comes first since that will better imitate a redirect in the browser's
-// history, but the meta tag is a fallback for most non-JS clients.
-const metaRedir = `
-
-
-
-
-
- Redirecting...
-
-`
diff --git a/caddyhttp/redirect/redirect_test.go b/caddyhttp/redirect/redirect_test.go
deleted file mode 100644
index faf3846ee6a..00000000000
--- a/caddyhttp/redirect/redirect_test.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package redirect
-
-import (
- "bytes"
- "context"
- "crypto/tls"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "strings"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestRedirect(t *testing.T) {
- for i, test := range []struct {
- from string
- expectedLocation string
- expectedCode int
- }{
- {"http://localhost/from", "/to", http.StatusMovedPermanently},
- {"http://localhost/a", "/b", http.StatusTemporaryRedirect},
- {"http://localhost/aa", "", http.StatusOK},
- {"http://localhost/", "", http.StatusOK},
- {"http://localhost/a?foo=bar", "/b", http.StatusTemporaryRedirect},
- {"http://localhost/asdf?foo=bar", "", http.StatusOK},
- {"http://localhost/foo#bar", "", http.StatusOK},
- {"http://localhost/a#foo", "/b", http.StatusTemporaryRedirect},
-
- // The scheme checks that were added to this package don't actually
- // help with redirects because of Caddy's design: a redirect middleware
- // for http will always be different than the redirect middleware for
- // https because they have to be on different listeners. These tests
- // just go to show extra bulletproofing, I guess.
- {"http://localhost/scheme", "https://localhost/scheme", http.StatusMovedPermanently},
- {"https://localhost/scheme", "", http.StatusOK},
- {"https://localhost/scheme2", "http://localhost/scheme2", http.StatusMovedPermanently},
- {"http://localhost/scheme2", "", http.StatusOK},
- {"http://localhost/scheme3", "https://localhost/scheme3", http.StatusMovedPermanently},
- {"https://localhost/scheme3", "", http.StatusOK},
- } {
- var nextCalled bool
-
- re := Redirect{
- Next: httpserver.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
- nextCalled = true
- return 0, nil
- }),
- Rules: []Rule{
- {FromScheme: func() string { return "http" }, FromPath: "/from", To: "/to", Code: http.StatusMovedPermanently, RequestMatcher: httpserver.IfMatcher{}},
- {FromScheme: func() string { return "http" }, FromPath: "/a", To: "/b", Code: http.StatusTemporaryRedirect, RequestMatcher: httpserver.IfMatcher{}},
-
- // These http and https schemes would never actually be mixed in the same
- // redirect rule with Caddy because http and https schemes have different listeners,
- // so they don't share a redirect rule. So although these tests prove something
- // impossible with Caddy, it's extra bulletproofing at very little cost.
- {FromScheme: func() string { return "http" }, FromPath: "/scheme", To: "https://localhost/scheme", Code: http.StatusMovedPermanently, RequestMatcher: httpserver.IfMatcher{}},
- {FromScheme: func() string { return "https" }, FromPath: "/scheme2", To: "http://localhost/scheme2", Code: http.StatusMovedPermanently, RequestMatcher: httpserver.IfMatcher{}},
- {FromScheme: func() string { return "" }, FromPath: "/scheme3", To: "https://localhost/scheme3", Code: http.StatusMovedPermanently, RequestMatcher: httpserver.IfMatcher{}},
- },
- }
-
- req, err := http.NewRequest("GET", test.from, nil)
- if err != nil {
- t.Fatalf("Test %d: Could not create HTTP request: %v", i, err)
- }
- if strings.HasPrefix(test.from, "https://") {
- req.TLS = new(tls.ConnectionState) // faux HTTPS
- }
-
- rec := httptest.NewRecorder()
- re.ServeHTTP(rec, req)
-
- if rec.Header().Get("Location") != test.expectedLocation {
- t.Errorf("Test %d: Expected Location header to be %q but was %q",
- i, test.expectedLocation, rec.Header().Get("Location"))
- }
-
- if rec.Code != test.expectedCode {
- t.Errorf("Test %d: Expected status code to be %d but was %d",
- i, test.expectedCode, rec.Code)
- }
-
- if nextCalled && test.expectedLocation != "" {
- t.Errorf("Test %d: Next handler was unexpectedly called", i)
- }
- }
-}
-
-func TestParametersRedirect(t *testing.T) {
- re := Redirect{
- Rules: []Rule{
- {FromScheme: func() string { return "http" }, FromPath: "/", Meta: false, To: "http://example.com{uri}", RequestMatcher: httpserver.IfMatcher{}},
- },
- }
-
- req, err := http.NewRequest("GET", "/a?b=c", nil)
- if err != nil {
- t.Fatalf("Test 1: Could not create HTTP request: %v", err)
- }
- ctx := context.WithValue(req.Context(), httpserver.OriginalURLCtxKey, *req.URL)
- req = req.WithContext(ctx)
-
- rec := httptest.NewRecorder()
- re.ServeHTTP(rec, req)
-
- if got, want := rec.Header().Get("Location"), "http://example.com/a?b=c"; got != want {
- t.Fatalf("Test 1: expected location header %s but was %s", want, got)
- }
-
- re = Redirect{
- Rules: []Rule{
- {FromScheme: func() string { return "http" }, FromPath: "/", Meta: false, To: "http://example.com/a{path}?b=c&{query}", RequestMatcher: httpserver.IfMatcher{}},
- },
- }
-
- req, err = http.NewRequest("GET", "/d?e=f", nil)
- if err != nil {
- t.Fatalf("Test 2: Could not create HTTP request: %v", err)
- }
- ctx = context.WithValue(req.Context(), httpserver.OriginalURLCtxKey, *req.URL)
- req = req.WithContext(ctx)
-
- re.ServeHTTP(rec, req)
-
- if got, want := rec.Header().Get("Location"), "http://example.com/a/d?b=c&e=f"; got != want {
- t.Fatalf("Test 2: expected location header %s but was %s", want, got)
- }
-}
-
-func TestMetaRedirect(t *testing.T) {
- re := Redirect{
- Rules: []Rule{
- {FromScheme: func() string { return "http" }, FromPath: "/whatever", Meta: true, To: "/something", RequestMatcher: httpserver.IfMatcher{}},
- {FromScheme: func() string { return "http" }, FromPath: "/", Meta: true, To: "https://example.com/", RequestMatcher: httpserver.IfMatcher{}},
- },
- }
-
- for i, test := range re.Rules {
- req, err := http.NewRequest("GET", test.FromPath, nil)
- if err != nil {
- t.Fatalf("Test %d: Could not create HTTP request: %v", i, err)
- }
-
- rec := httptest.NewRecorder()
- re.ServeHTTP(rec, req)
-
- body, err := ioutil.ReadAll(rec.Body)
- if err != nil {
- t.Fatalf("Test %d: Could not read HTTP response body: %v", i, err)
- }
- expectedSnippet := ``
- if !bytes.Contains(body, []byte(expectedSnippet)) {
- t.Errorf("Test %d: Expected Response Body to contain %q but was %q",
- i, expectedSnippet, body)
- }
- }
-}
diff --git a/caddyhttp/redirect/setup.go b/caddyhttp/redirect/setup.go
deleted file mode 100644
index 2e238dd1363..00000000000
--- a/caddyhttp/redirect/setup.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package redirect
-
-import (
- "net/http"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("redir", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// setup configures a new Redirect middleware instance.
-func setup(c *caddy.Controller) error {
- rules, err := redirParse(c)
- if err != nil {
- return err
- }
-
- httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- return Redirect{Next: next, Rules: rules}
- })
-
- return nil
-}
-
-func redirParse(c *caddy.Controller) ([]Rule, error) {
- var redirects []Rule
-
- cfg := httpserver.GetConfig(c)
-
- initRule := func(rule *Rule, defaultCode string, args []string) error {
- rule.FromScheme = func() string {
- if cfg.TLS.Enabled {
- return "https"
- }
- return "http"
- }
-
- var (
- from = "/"
- to string
- code = defaultCode
- )
- switch len(args) {
- case 1:
- // To specified (catch-all redirect)
- // Not sure why user is doing this in a table, as it causes all other redirects to be ignored.
- // As such, this feature remains undocumented.
- to = args[0]
- case 2:
- // From and To specified
- from = args[0]
- to = args[1]
- case 3:
- // From, To, and Code specified
- from = args[0]
- to = args[1]
- code = args[2]
- default:
- return c.ArgErr()
- }
-
- rule.FromPath = from
- rule.To = to
- if code == "meta" {
- rule.Meta = true
- code = defaultCode
- }
- if codeNumber, ok := httpRedirs[code]; ok {
- rule.Code = codeNumber
- } else {
- return c.Errf("Invalid redirect code '%v'", code)
- }
-
- return nil
- }
-
- // checkAndSaveRule checks the rule for validity (except the redir code)
- // and saves it if it's valid, or returns an error.
- checkAndSaveRule := func(rule Rule) error {
- if rule.FromPath == rule.To {
- return c.Err("'from' and 'to' values of redirect rule cannot be the same")
- }
-
- for _, otherRule := range redirects {
- if otherRule.FromPath == rule.FromPath {
- return c.Errf("rule with duplicate 'from' value: %s -> %s", otherRule.FromPath, otherRule.To)
- }
- }
-
- redirects = append(redirects, rule)
- return nil
- }
-
- const initDefaultCode = "301"
-
- for c.Next() {
- args := c.RemainingArgs()
- matcher, err := httpserver.SetupIfMatcher(c)
- if err != nil {
- return nil, err
- }
-
- var hadOptionalBlock bool
- for c.NextBlock() {
- if httpserver.IfMatcherKeyword(c) {
- continue
- }
-
- hadOptionalBlock = true
-
- rule := Rule{
- RequestMatcher: matcher,
- }
-
- defaultCode := initDefaultCode
- // Set initial redirect code
- if len(args) == 1 {
- defaultCode = args[0]
- }
-
- // RemainingArgs only gets the values after the current token, but in our
- // case we want to include the current token to get an accurate count.
- insideArgs := append([]string{c.Val()}, c.RemainingArgs()...)
- err := initRule(&rule, defaultCode, insideArgs)
- if err != nil {
- return redirects, err
- }
-
- err = checkAndSaveRule(rule)
- if err != nil {
- return redirects, err
- }
- }
-
- if !hadOptionalBlock {
- rule := Rule{
- RequestMatcher: matcher,
- }
- err := initRule(&rule, initDefaultCode, args)
- if err != nil {
- return redirects, err
- }
-
- err = checkAndSaveRule(rule)
- if err != nil {
- return redirects, err
- }
- }
- }
-
- return redirects, nil
-}
-
-// httpRedirs is a list of supported HTTP redirect codes.
-var httpRedirs = map[string]int{
- "300": http.StatusMultipleChoices,
- "301": http.StatusMovedPermanently,
- "302": http.StatusFound, // (NOT CORRECT for "Temporary Redirect", see 307)
- "303": http.StatusSeeOther,
- "304": http.StatusNotModified,
- "305": http.StatusUseProxy,
- "307": http.StatusTemporaryRedirect,
- "308": http.StatusPermanentRedirect, // Permanent Redirect (RFC 7238)
-}
diff --git a/caddyhttp/redirect/setup_test.go b/caddyhttp/redirect/setup_test.go
deleted file mode 100644
index bb082fd84b1..00000000000
--- a/caddyhttp/redirect/setup_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package redirect
-
-import (
- "fmt"
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
-
- for j, test := range []struct {
- input string
- shouldErr bool
- expectedRules []Rule
- }{
- // test case #0 tests the recognition of a valid HTTP status code defined outside of block statement
- {"redir 300 {\n/ /foo\n}", false, []Rule{{FromPath: "/", To: "/foo", Code: 300, RequestMatcher: httpserver.IfMatcher{}}}},
-
- // test case #1 tests the recognition of an invalid HTTP status code defined outside of block statement
- {"redir 9000 {\n/ /foo\n}", true, []Rule{{}}},
-
- // test case #2 tests the detection of a valid HTTP status code outside of a block statement being overridden by an invalid HTTP status code inside statement of a block statement
- {"redir 300 {\n/ /foo 9000\n}", true, []Rule{{}}},
-
- // test case #3 tests the detection of an invalid HTTP status code outside of a block statement being overridden by a valid HTTP status code inside statement of a block statement
- {"redir 9000 {\n/ /foo 300\n}", true, []Rule{{}}},
-
- // test case #4 tests the recognition of a TO redirection in a block statement.The HTTP status code is set to the default of 301 - MovedPermanently
- {"redir 302 {\n/foo\n}", false, []Rule{{FromPath: "/", To: "/foo", Code: 302, RequestMatcher: httpserver.IfMatcher{}}}},
-
- // test case #5 tests the recognition of a TO and From redirection in a block statement
- {"redir {\n/bar /foo 303\n}", false, []Rule{{FromPath: "/bar", To: "/foo", Code: 303, RequestMatcher: httpserver.IfMatcher{}}}},
-
- // test case #6 tests the recognition of a TO redirection in a non-block statement. The HTTP status code is set to the default of 301 - MovedPermanently
- {"redir /foo", false, []Rule{{FromPath: "/", To: "/foo", Code: 301, RequestMatcher: httpserver.IfMatcher{}}}},
-
- // test case #7 tests the recognition of a TO and From redirection in a non-block statement
- {"redir /bar /foo 303", false, []Rule{{FromPath: "/bar", To: "/foo", Code: 303, RequestMatcher: httpserver.IfMatcher{}}}},
-
- // test case #8 tests the recognition of multiple redirections
- {"redir {\n / /foo 304 \n} \n redir {\n /bar /foobar 305 \n}", false,
- []Rule{{FromPath: "/", To: "/foo", Code: 304, RequestMatcher: httpserver.IfMatcher{}},
- {FromPath: "/bar", To: "/foobar", Code: 305, RequestMatcher: httpserver.IfMatcher{}}}},
-
- // test case #9 tests the detection of duplicate redirections
- {"redir {\n /bar /foo 304 \n} redir {\n /bar /foo 304 \n}", true, []Rule{{}}},
-
- // test case #10 tests the detection of a valid HTTP status code outside of a block statement being overridden by an valid HTTP status code inside statement of a block statement
- {"redir 300 {\n/ /foo 301\n}", false, []Rule{{FromPath: "/", To: "/foo", Code: 301, RequestMatcher: httpserver.IfMatcher{}}}},
-
- // test case #11 tests the recognition of a matcher
- {"redir {\n if {port} is 80\n/ /foo\n}", false, []Rule{{FromPath: "/", To: "/foo", Code: 301,
- RequestMatcher: func() httpserver.IfMatcher {
- c := caddy.NewTestController("http", "{\n if {port} is 80\n}")
- matcher, _ := httpserver.SetupIfMatcher(c)
- return matcher.(httpserver.IfMatcher)
- }()}}},
-
- // test case #12 tests the detection of a valid HTTP status code outside of a block statement with a matcher
- {"redir 300 {\n if {port} is 80\n/ /foo\n}", false, []Rule{{FromPath: "/", To: "/foo", Code: 300,
- RequestMatcher: func() httpserver.IfMatcher {
- c := caddy.NewTestController("http", "{\n if {port} is 80\n}")
- matcher, _ := httpserver.SetupIfMatcher(c)
- return matcher.(httpserver.IfMatcher)
- }()}}},
- } {
- c := caddy.NewTestController("http", test.input)
- err := setup(c)
- if err != nil && !test.shouldErr {
- t.Errorf("Test case #%d received an error of %v", j, err)
- } else if test.shouldErr {
- continue
- }
- mids := httpserver.GetConfig(c).Middleware()
- receivedRules := mids[len(mids)-1](nil).(Redirect).Rules
-
- for i, receivedRule := range receivedRules {
- if receivedRule.FromPath != test.expectedRules[i].FromPath {
- t.Errorf("Test case #%d.%d expected a from path of %s, but received a from path of %s", j, i, test.expectedRules[i].FromPath, receivedRule.FromPath)
- }
- if receivedRule.To != test.expectedRules[i].To {
- t.Errorf("Test case #%d.%d expected a TO path of %s, but received a TO path of %s", j, i, test.expectedRules[i].To, receivedRule.To)
- }
- if receivedRule.Code != test.expectedRules[i].Code {
- t.Errorf("Test case #%d.%d expected a HTTP status code of %d, but received a code of %d", j, i, test.expectedRules[i].Code, receivedRule.Code)
- }
- if gotMatcher, expectMatcher := fmt.Sprint(receivedRule.RequestMatcher), fmt.Sprint(test.expectedRules[i].RequestMatcher); gotMatcher != expectMatcher {
- t.Errorf("Test case #%d.%d expected a Matcher %s, but received a Matcher %s", j, i, expectMatcher, gotMatcher)
- }
- }
- }
-
-}
diff --git a/caddyhttp/requestid/requestid.go b/caddyhttp/requestid/requestid.go
deleted file mode 100644
index 40b258d4200..00000000000
--- a/caddyhttp/requestid/requestid.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package requestid
-
-import (
- "context"
- "log"
- "net/http"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
- uuid "github.com/nu7hatch/gouuid"
-)
-
-// Handler is a middleware handler
-type Handler struct {
- Next httpserver.Handler
-}
-
-func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- reqid := UUID()
- c := context.WithValue(r.Context(), httpserver.RequestIDCtxKey, reqid)
- r = r.WithContext(c)
-
- return h.Next.ServeHTTP(w, r)
-}
-
-// UUID returns U4 UUID
-func UUID() string {
- u4, err := uuid.NewV4()
- if err != nil {
- log.Printf("[ERROR] generating request ID: %v", err)
- return ""
- }
-
- return u4.String()
-}
diff --git a/caddyhttp/requestid/requestid_test.go b/caddyhttp/requestid/requestid_test.go
deleted file mode 100644
index 77014dfd433..00000000000
--- a/caddyhttp/requestid/requestid_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package requestid
-
-import (
- "context"
- "net/http"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestRequestID(t *testing.T) {
- request, err := http.NewRequest("GET", "http://localhost/", nil)
- if err != nil {
- t.Fatal("Could not create HTTP request:", err)
- }
-
- reqid := UUID()
-
- c := context.WithValue(request.Context(), httpserver.RequestIDCtxKey, reqid)
-
- request = request.WithContext(c)
-
- // See caddyhttp/replacer.go
- value, _ := request.Context().Value(httpserver.RequestIDCtxKey).(string)
-
- if value == "" {
- t.Fatal("Request ID should not be empty")
- }
-
- if value != reqid {
- t.Fatal("Request ID does not match")
- }
-}
diff --git a/caddyhttp/requestid/setup.go b/caddyhttp/requestid/setup.go
deleted file mode 100644
index 8c06af9e938..00000000000
--- a/caddyhttp/requestid/setup.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package requestid
-
-import (
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("request_id", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-func setup(c *caddy.Controller) error {
- for c.Next() {
- if c.NextArg() {
- return c.ArgErr() //no arg expected.
- }
- }
-
- httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- return Handler{Next: next}
- })
-
- return nil
-}
diff --git a/caddyhttp/requestid/setup_test.go b/caddyhttp/requestid/setup_test.go
deleted file mode 100644
index 17cbceee39f..00000000000
--- a/caddyhttp/requestid/setup_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package requestid
-
-import (
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
- c := caddy.NewTestController("http", `requestid`)
- err := setup(c)
- if err != nil {
- t.Errorf("Expected no errors, got: %v", err)
- }
- mids := httpserver.GetConfig(c).Middleware()
- if len(mids) == 0 {
- t.Fatal("Expected middleware, got 0 instead")
- }
-
- handler := mids[0](httpserver.EmptyNext)
- myHandler, ok := handler.(Handler)
-
- if !ok {
- t.Fatalf("Expected handler to be type Handler, got: %#v", handler)
- }
-
- if !httpserver.SameNext(myHandler.Next, httpserver.EmptyNext) {
- t.Error("'Next' field of handler was not set properly")
- }
-}
-
-func TestSetupWithArg(t *testing.T) {
- c := caddy.NewTestController("http", `requestid abc`)
- err := setup(c)
- if err == nil {
- t.Errorf("Expected an error, got: %v", err)
- }
- mids := httpserver.GetConfig(c).Middleware()
- if len(mids) != 0 {
- t.Fatal("Expected no middleware")
- }
-}
diff --git a/caddyhttp/rewrite/rewrite.go b/caddyhttp/rewrite/rewrite.go
deleted file mode 100644
index 14d25ca585e..00000000000
--- a/caddyhttp/rewrite/rewrite.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Package rewrite is middleware for rewriting requests internally to
-// a different path.
-package rewrite
-
-import (
- "fmt"
- "net/http"
- "net/url"
- "path"
- "path/filepath"
- "regexp"
- "strings"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// Result is the result of a rewrite
-type Result int
-
-const (
- // RewriteIgnored is returned when rewrite is not done on request.
- RewriteIgnored Result = iota
- // RewriteDone is returned when rewrite is done on request.
- RewriteDone
-)
-
-// Rewrite is middleware to rewrite request locations internally before being handled.
-type Rewrite struct {
- Next httpserver.Handler
- FileSys http.FileSystem
- Rules []httpserver.HandlerConfig
-}
-
-// ServeHTTP implements the httpserver.Handler interface.
-func (rw Rewrite) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- if rule := httpserver.ConfigSelector(rw.Rules).Select(r); rule != nil {
- rule.(Rule).Rewrite(rw.FileSys, r)
- }
-
- return rw.Next.ServeHTTP(w, r)
-}
-
-// Rule describes an internal location rewrite rule.
-type Rule interface {
- httpserver.HandlerConfig
- // Rewrite rewrites the internal location of the current request.
- Rewrite(http.FileSystem, *http.Request) Result
-}
-
-// SimpleRule is a simple rewrite rule.
-type SimpleRule struct {
- From, To string
-}
-
-// NewSimpleRule creates a new Simple Rule
-func NewSimpleRule(from, to string) SimpleRule {
- return SimpleRule{from, to}
-}
-
-// BasePath satisfies httpserver.Config
-func (s SimpleRule) BasePath() string { return s.From }
-
-// Match satisfies httpserver.Config
-func (s SimpleRule) Match(r *http.Request) bool { return s.From == r.URL.Path }
-
-// Rewrite rewrites the internal location of the current request.
-func (s SimpleRule) Rewrite(fs http.FileSystem, r *http.Request) Result {
-
- // attempt rewrite
- return To(fs, r, s.To, newReplacer(r))
-}
-
-// ComplexRule is a rewrite rule based on a regular expression
-type ComplexRule struct {
- // Path base. Request to this path and subpaths will be rewritten
- Base string
-
- // Path to rewrite to
- To string
-
- // Extensions to filter by
- Exts []string
-
- // Request matcher
- httpserver.RequestMatcher
-
- Regexp *regexp.Regexp
-}
-
-// NewComplexRule creates a new RegexpRule. It returns an error if regexp
-// pattern (pattern) or extensions (ext) are invalid.
-func NewComplexRule(base, pattern, to string, ext []string, matcher httpserver.RequestMatcher) (ComplexRule, error) {
- // validate regexp if present
- var r *regexp.Regexp
- if pattern != "" {
- var err error
- r, err = regexp.Compile(pattern)
- if err != nil {
- return ComplexRule{}, err
- }
- }
-
- // validate extensions if present
- for _, v := range ext {
- if len(v) < 2 || (len(v) < 3 && v[0] == '!') {
- // check if no extension is specified
- if v != "/" && v != "!/" {
- return ComplexRule{}, fmt.Errorf("invalid extension %v", v)
- }
- }
- }
-
- // use both IfMatcher and PathMatcher
- matcher = httpserver.MergeRequestMatchers(
- // If condition matcher
- matcher,
- // Base path matcher
- httpserver.PathMatcher(base),
- )
-
- return ComplexRule{
- Base: base,
- To: to,
- Exts: ext,
- RequestMatcher: matcher,
- Regexp: r,
- }, nil
-}
-
-// BasePath satisfies httpserver.Config
-func (r ComplexRule) BasePath() string { return r.Base }
-
-// Match satisfies httpserver.Config.
-//
-// Though ComplexRule embeds a RequestMatcher, additional
-// checks are needed which requires a custom implementation.
-func (r ComplexRule) Match(req *http.Request) bool {
- // validate RequestMatcher
- // includes if and path
- if !r.RequestMatcher.Match(req) {
- return false
- }
-
- // validate extensions
- if !r.matchExt(req.URL.Path) {
- return false
- }
-
- // if regex is nil, ignore
- if r.Regexp == nil {
- return true
- }
- // otherwise validate regex
- return r.regexpMatches(req.URL.Path) != nil
-}
-
-// Rewrite rewrites the internal location of the current request.
-func (r ComplexRule) Rewrite(fs http.FileSystem, req *http.Request) (re Result) {
- replacer := newReplacer(req)
-
- // validate regexp if present
- if r.Regexp != nil {
- matches := r.regexpMatches(req.URL.Path)
- switch len(matches) {
- case 0:
- // no match
- return
- default:
- // set regexp match variables {1}, {2} ...
-
- // url escaped values of ? and #.
- q, f := url.QueryEscape("?"), url.QueryEscape("#")
-
- for i := 1; i < len(matches); i++ {
- // Special case of unescaped # and ? by stdlib regexp.
- // Reverse the unescape.
- if strings.ContainsAny(matches[i], "?#") {
- matches[i] = strings.NewReplacer("?", q, "#", f).Replace(matches[i])
- }
-
- replacer.Set(fmt.Sprint(i), matches[i])
- }
- }
- }
-
- // attempt rewrite
- return To(fs, req, r.To, replacer)
-}
-
-// matchExt matches rPath against registered file extensions.
-// Returns true if a match is found and false otherwise.
-func (r ComplexRule) matchExt(rPath string) bool {
- f := filepath.Base(rPath)
- ext := path.Ext(f)
- if ext == "" {
- ext = "/"
- }
-
- mustUse := false
- for _, v := range r.Exts {
- use := true
- if v[0] == '!' {
- use = false
- v = v[1:]
- }
-
- if use {
- mustUse = true
- }
-
- if ext == v {
- return use
- }
- }
-
- return !mustUse
-}
-
-func (r ComplexRule) regexpMatches(rPath string) []string {
- if r.Regexp != nil {
- // include trailing slash in regexp if present
- start := len(r.Base)
- if strings.HasSuffix(r.Base, "/") {
- start--
- }
- return r.Regexp.FindStringSubmatch(rPath[start:])
- }
- return nil
-}
-
-func newReplacer(r *http.Request) httpserver.Replacer {
- return httpserver.NewReplacer(r, nil, "")
-}
diff --git a/caddyhttp/rewrite/rewrite_test.go b/caddyhttp/rewrite/rewrite_test.go
deleted file mode 100644
index 6be2f221183..00000000000
--- a/caddyhttp/rewrite/rewrite_test.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package rewrite
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/http/httptest"
- "strings"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestRewrite(t *testing.T) {
- rw := Rewrite{
- Next: httpserver.HandlerFunc(urlPrinter),
- Rules: []httpserver.HandlerConfig{
- NewSimpleRule("/from", "/to"),
- NewSimpleRule("/a", "/b"),
- NewSimpleRule("/b", "/b{uri}"),
- },
- FileSys: http.Dir("."),
- }
-
- regexps := [][]string{
- {"/reg/", ".*", "/to", ""},
- {"/r/", "[a-z]+", "/toaz", "!.html|"},
- {"/path/", "[a-z0-9]", "/to/{path}", ""},
- {"/url/", "a([a-z0-9]*)s([A-Z]{2})", "/to/{rewrite_path}", ""},
- {"/ab/", "ab", "/ab?{query}", ".txt|"},
- {"/ab/", "ab", "/ab?type=html&{query}", ".html|"},
- {"/abc/", "ab", "/abc/{file}", ".html|"},
- {"/abcd/", "ab", "/a/{dir}/{file}", ".html|"},
- {"/abcde/", "ab", "/a#{fragment}", ".html|"},
- {"/ab/", `.*\.jpg`, "/ajpg", ""},
- {"/reggrp", `/ad/([0-9]+)([a-z]*)`, "/a{1}/{2}", ""},
- {"/reg2grp", `(.*)`, "/{1}", ""},
- {"/reg3grp", `(.*)/(.*)/(.*)`, "/{1}{2}{3}", ""},
- {"/hashtest", "(.*)", "/{1}", ""},
- }
-
- for _, regexpRule := range regexps {
- var ext []string
- if s := strings.Split(regexpRule[3], "|"); len(s) > 1 {
- ext = s[:len(s)-1]
- }
- rule, err := NewComplexRule(regexpRule[0], regexpRule[1], regexpRule[2], ext, httpserver.IfMatcher{})
- if err != nil {
- t.Fatal(err)
- }
- rw.Rules = append(rw.Rules, rule)
- }
-
- tests := []struct {
- from string
- expectedTo string
- }{
- {"/from", "/to"},
- {"/a", "/b"},
- {"/b", "/b/b"},
- {"/aa", "/aa"},
- {"/", "/"},
- {"/a?foo=bar", "/b?foo=bar"},
- {"/asdf?foo=bar", "/asdf?foo=bar"},
- {"/foo#bar", "/foo#bar"},
- {"/a#foo", "/b#foo"},
- {"/reg/foo", "/to"},
- {"/re", "/re"},
- {"/r/", "/r/"},
- {"/r/123", "/r/123"},
- {"/r/a123", "/toaz"},
- {"/r/abcz", "/toaz"},
- {"/r/z", "/toaz"},
- {"/r/z.html", "/r/z.html"},
- {"/r/z.js", "/toaz"},
- {"/path/a1b2c", "/to/path/a1b2c"},
- {"/path/d3e4f", "/to/path/d3e4f"},
- {"/url/asAB", "/to/url/asAB"},
- {"/url/aBsAB", "/url/aBsAB"},
- {"/url/a00sAB", "/to/url/a00sAB"},
- {"/url/a0z0sAB", "/to/url/a0z0sAB"},
- {"/ab/aa", "/ab/aa"},
- {"/ab/ab", "/ab/ab"},
- {"/ab/ab.txt", "/ab"},
- {"/ab/ab.txt?name=name", "/ab?name=name"},
- {"/ab/ab.html?name=name", "/ab?type=html&name=name"},
- {"/abc/ab.html", "/abc/ab.html"},
- {"/abcd/abcd.html", "/a/abcd/abcd.html"},
- {"/abcde/abcde.html", "/a"},
- {"/abcde/abcde.html#1234", "/a#1234"},
- {"/ab/ab.jpg", "/ajpg"},
- {"/reggrp/ad/12", "/a12/"},
- {"/reggrp/ad/124a", "/a124/a"},
- {"/reggrp/ad/124abc", "/a124/abc"},
- {"/reg2grp/ad/124abc", "/ad/124abc"},
- {"/reg3grp/ad/aa/66", "/adaa66"},
- {"/reg3grp/ad612/n1n/ab", "/ad612n1nab"},
- {"/hashtest/a%20%23%20test", "/a%20%23%20test"},
- {"/hashtest/a%20%3F%20test", "/a%20%3F%20test"},
- {"/hashtest/a%20%3F%23test", "/a%20%3F%23test"},
- }
-
- for i, test := range tests {
- req, err := http.NewRequest("GET", test.from, nil)
- if err != nil {
- t.Fatalf("Test %d: Could not create HTTP request: %v", i, err)
- }
- ctx := context.WithValue(req.Context(), httpserver.OriginalURLCtxKey, *req.URL)
- req = req.WithContext(ctx)
-
- rec := httptest.NewRecorder()
- rw.ServeHTTP(rec, req)
-
- if got, want := rec.Body.String(), test.expectedTo; got != want {
- t.Errorf("Test %d: Expected URL to be '%s' but was '%s'", i, want, got)
- }
- }
-}
-
-func urlPrinter(w http.ResponseWriter, r *http.Request) (int, error) {
- fmt.Fprint(w, r.URL.String())
- return 0, nil
-}
diff --git a/caddyhttp/rewrite/setup.go b/caddyhttp/rewrite/setup.go
deleted file mode 100644
index 71c498e1d68..00000000000
--- a/caddyhttp/rewrite/setup.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package rewrite
-
-import (
- "net/http"
- "strings"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("rewrite", caddy.Plugin{
- ServerType: "http",
- Action: setup,
- })
-}
-
-// setup configures a new Rewrite middleware instance.
-func setup(c *caddy.Controller) error {
- rewrites, err := rewriteParse(c)
- if err != nil {
- return err
- }
-
- cfg := httpserver.GetConfig(c)
-
- cfg.AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
- return Rewrite{
- Next: next,
- FileSys: http.Dir(cfg.Root),
- Rules: rewrites,
- }
- })
-
- return nil
-}
-
-func rewriteParse(c *caddy.Controller) ([]httpserver.HandlerConfig, error) {
- var rules []httpserver.HandlerConfig
-
- for c.Next() {
- var rule Rule
- var err error
- var base = "/"
- var pattern, to string
- var ext []string
-
- args := c.RemainingArgs()
-
- var matcher httpserver.RequestMatcher
-
- switch len(args) {
- case 1:
- base = args[0]
- fallthrough
- case 0:
- // Integrate request matcher for 'if' conditions.
- matcher, err = httpserver.SetupIfMatcher(c)
- if err != nil {
- return nil, err
- }
-
- for c.NextBlock() {
- if httpserver.IfMatcherKeyword(c) {
- continue
- }
- switch c.Val() {
- case "r", "regexp":
- if !c.NextArg() {
- return nil, c.ArgErr()
- }
- pattern = c.Val()
- case "to":
- args1 := c.RemainingArgs()
- if len(args1) == 0 {
- return nil, c.ArgErr()
- }
- to = strings.Join(args1, " ")
- case "ext":
- args1 := c.RemainingArgs()
- if len(args1) == 0 {
- return nil, c.ArgErr()
- }
- ext = args1
- default:
- return nil, c.ArgErr()
- }
- }
- // ensure to is specified
- if to == "" {
- return nil, c.ArgErr()
- }
- if rule, err = NewComplexRule(base, pattern, to, ext, matcher); err != nil {
- return nil, err
- }
- rules = append(rules, rule)
-
- // the only unhandled case is 2 and above
- default:
- rule = NewSimpleRule(args[0], strings.Join(args[1:], " "))
- rules = append(rules, rule)
- }
-
- }
-
- return rules, nil
-}
diff --git a/caddyhttp/rewrite/setup_test.go b/caddyhttp/rewrite/setup_test.go
deleted file mode 100644
index 4e32c2eed64..00000000000
--- a/caddyhttp/rewrite/setup_test.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package rewrite
-
-import (
- "fmt"
- "regexp"
- "strings"
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestSetup(t *testing.T) {
- c := caddy.NewTestController("http", `rewrite /from /to`)
- err := setup(c)
- if err != nil {
- t.Errorf("Expected no errors, but got: %v", err)
- }
- mids := httpserver.GetConfig(c).Middleware()
- if len(mids) == 0 {
- t.Fatal("Expected middleware, had 0 instead")
- }
-
- handler := mids[0](httpserver.EmptyNext)
- myHandler, ok := handler.(Rewrite)
- if !ok {
- t.Fatalf("Expected handler to be type Rewrite, got: %#v", handler)
- }
-
- if !httpserver.SameNext(myHandler.Next, httpserver.EmptyNext) {
- t.Error("'Next' field of handler was not set properly")
- }
-
- if len(myHandler.Rules) != 1 {
- t.Errorf("Expected handler to have %d rule, has %d instead", 1, len(myHandler.Rules))
- }
-}
-
-func TestRewriteParse(t *testing.T) {
- simpleTests := []struct {
- input string
- shouldErr bool
- expected []Rule
- }{
- {`rewrite /from /to`, false, []Rule{
- SimpleRule{From: "/from", To: "/to"},
- }},
- {`rewrite /from /to
- rewrite a b`, false, []Rule{
- SimpleRule{From: "/from", To: "/to"},
- SimpleRule{From: "a", To: "b"},
- }},
- {`rewrite a`, true, []Rule{}},
- {`rewrite`, true, []Rule{}},
- {`rewrite a b c`, false, []Rule{
- SimpleRule{From: "a", To: "b c"},
- }},
- }
-
- for i, test := range simpleTests {
- actual, err := rewriteParse(caddy.NewTestController("http", test.input))
-
- if err == nil && test.shouldErr {
- t.Errorf("Test %d didn't error, but it should have", i)
- } else if err != nil && !test.shouldErr {
- t.Errorf("Test %d errored, but it shouldn't have; got '%v'", i, err)
- } else if err != nil && test.shouldErr {
- continue
- }
-
- if len(actual) != len(test.expected) {
- t.Fatalf("Test %d expected %d rules, but got %d",
- i, len(test.expected), len(actual))
- }
-
- for j, e := range test.expected {
- actualRule := actual[j].(SimpleRule)
- expectedRule := e.(SimpleRule)
-
- if actualRule.From != expectedRule.From {
- t.Errorf("Test %d, rule %d: Expected From=%s, got %s",
- i, j, expectedRule.From, actualRule.From)
- }
-
- if actualRule.To != expectedRule.To {
- t.Errorf("Test %d, rule %d: Expected To=%s, got %s",
- i, j, expectedRule.To, actualRule.To)
- }
- }
- }
-
- regexpTests := []struct {
- input string
- shouldErr bool
- expected []Rule
- }{
- {`rewrite {
- r .*
- to /to /index.php?
- }`, false, []Rule{
- ComplexRule{Base: "/", To: "/to /index.php?", Regexp: regexp.MustCompile(".*")},
- }},
- {`rewrite {
- regexp .*
- to /to
- ext / html txt
- }`, false, []Rule{
- ComplexRule{Base: "/", To: "/to", Exts: []string{"/", "html", "txt"}, Regexp: regexp.MustCompile(".*")},
- }},
- {`rewrite /path {
- r rr
- to /dest
- }
- rewrite / {
- regexp [a-z]+
- to /to /to2
- }
- `, false, []Rule{
- ComplexRule{Base: "/path", To: "/dest", Regexp: regexp.MustCompile("rr")},
- ComplexRule{Base: "/", To: "/to /to2", Regexp: regexp.MustCompile("[a-z]+")},
- }},
- {`rewrite {
- r .*
- }`, true, []Rule{
- ComplexRule{},
- }},
- {`rewrite {
-
- }`, true, []Rule{
- ComplexRule{},
- }},
- {`rewrite /`, true, []Rule{
- ComplexRule{},
- }},
- {`rewrite {
- if {path} match /
- to /to
- }`, false, []Rule{
- ComplexRule{Base: "/", To: "/to"},
- }},
- }
-
- for i, test := range regexpTests {
- actual, err := rewriteParse(caddy.NewTestController("http", test.input))
-
- if err == nil && test.shouldErr {
- t.Errorf("Test %d didn't error, but it should have", i)
- } else if err != nil && !test.shouldErr {
- t.Errorf("Test %d errored, but it shouldn't have; got '%v'", i, err)
- } else if err != nil && test.shouldErr {
- continue
- }
-
- if len(actual) != len(test.expected) {
- t.Fatalf("Test %d expected %d rules, but got %d",
- i, len(test.expected), len(actual))
- }
-
- for j, e := range test.expected {
- actualRule := actual[j].(ComplexRule)
- expectedRule := e.(ComplexRule)
-
- if actualRule.Base != expectedRule.Base {
- t.Errorf("Test %d, rule %d: Expected Base=%s, got %s",
- i, j, expectedRule.Base, actualRule.Base)
- }
-
- if actualRule.To != expectedRule.To {
- t.Errorf("Test %d, rule %d: Expected To=%s, got %s",
- i, j, expectedRule.To, actualRule.To)
- }
-
- if fmt.Sprint(actualRule.Exts) != fmt.Sprint(expectedRule.Exts) {
- t.Errorf("Test %d, rule %d: Expected Ext=%v, got %v",
- i, j, expectedRule.To, actualRule.To)
- }
-
- if actualRule.Regexp != nil {
- if actualRule.Regexp.String() != expectedRule.Regexp.String() {
- t.Errorf("Test %d, rule %d: Expected Pattern=%s, got %s",
- i, j, actualRule.Regexp.String(), expectedRule.Regexp.String())
- }
- }
- }
-
- if rules_fmt := fmt.Sprintf("%v", actual); strings.HasPrefix(rules_fmt, "%!") {
- t.Errorf("Test %d: Failed to string encode: %#v", i, rules_fmt)
- }
- }
-}
diff --git a/caddyhttp/rewrite/testdata/testfile b/caddyhttp/rewrite/testdata/testfile
deleted file mode 100644
index 7b4d68d70fc..00000000000
--- a/caddyhttp/rewrite/testdata/testfile
+++ /dev/null
@@ -1 +0,0 @@
-empty
\ No newline at end of file
diff --git a/caddyhttp/rewrite/to.go b/caddyhttp/rewrite/to.go
deleted file mode 100644
index 6e296dc9c03..00000000000
--- a/caddyhttp/rewrite/to.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package rewrite
-
-import (
- "log"
- "net/http"
- "net/url"
- "path"
- "strings"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-// To attempts rewrite. It attempts to rewrite to first valid path
-// or the last path if none of the paths are valid.
-func To(fs http.FileSystem, r *http.Request, to string, replacer httpserver.Replacer) Result {
- tos := strings.Fields(to)
-
- // try each rewrite paths
- t := ""
- query := ""
- for _, v := range tos {
- t = replacer.Replace(v)
- tparts := strings.SplitN(t, "?", 2)
- t = path.Clean(tparts[0])
-
- if len(tparts) > 1 {
- query = tparts[1]
- }
-
- // add trailing slash for directories, if present
- if strings.HasSuffix(tparts[0], "/") && !strings.HasSuffix(t, "/") {
- t += "/"
- }
-
- // validate file
- if validFile(fs, t) {
- break
- }
- }
-
- // validate resulting path
- u, err := url.Parse(t)
- if err != nil {
- // Let the user know we got here. Rewrite is expected but
- // the resulting url is invalid.
- log.Printf("[ERROR] rewrite: resulting path '%v' is invalid. error: %v", t, err)
- return RewriteIgnored
- }
-
- // perform rewrite
- r.URL.Path = u.Path
- if query != "" {
- // overwrite query string if present
- r.URL.RawQuery = query
- }
- if u.Fragment != "" {
- // overwrite fragment if present
- r.URL.Fragment = u.Fragment
- }
-
- return RewriteDone
-}
-
-// validFile checks if file exists on the filesystem.
-// if file ends with `/`, it is validated as a directory.
-func validFile(fs http.FileSystem, file string) bool {
- if fs == nil {
- return false
- }
-
- f, err := fs.Open(file)
- if err != nil {
- return false
- }
- defer f.Close()
-
- stat, err := f.Stat()
- if err != nil {
- return false
- }
-
- // directory
- if strings.HasSuffix(file, "/") {
- return stat.IsDir()
- }
-
- // file
- return !stat.IsDir()
-}
diff --git a/caddyhttp/rewrite/to_test.go b/caddyhttp/rewrite/to_test.go
deleted file mode 100644
index e809eec9d81..00000000000
--- a/caddyhttp/rewrite/to_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package rewrite
-
-import (
- "context"
- "net/http"
- "net/url"
- "testing"
-
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestTo(t *testing.T) {
- fs := http.Dir("testdata")
- tests := []struct {
- url string
- to string
- expected string
- }{
- {"/", "/somefiles", "/somefiles"},
- {"/somefiles", "/somefiles /index.php{uri}", "/index.php/somefiles"},
- {"/somefiles", "/testfile /index.php{uri}", "/testfile"},
- {"/somefiles", "/testfile/ /index.php{uri}", "/index.php/somefiles"},
- {"/somefiles", "/somefiles /index.php{uri}", "/index.php/somefiles"},
- {"/?a=b", "/somefiles /index.php?{query}", "/index.php?a=b"},
- {"/?a=b", "/testfile /index.php?{query}", "/testfile?a=b"},
- {"/?a=b", "/testdir /index.php?{query}", "/index.php?a=b"},
- {"/?a=b", "/testdir/ /index.php?{query}", "/testdir/?a=b"},
- {"/test?url=http://", " /p/{path}?{query}", "/p/test?url=http://"},
- {"/test?url=http://", " /p/{rewrite_path}?{query}", "/p/test?url=http://"},
- {"/test/?url=http://", " /{uri}", "/test/?url=http://"},
- }
-
- uri := func(r *url.URL) string {
- uri := r.Path
- if r.RawQuery != "" {
- uri += "?" + r.RawQuery
- }
- return uri
- }
- for i, test := range tests {
- r, err := http.NewRequest("GET", test.url, nil)
- if err != nil {
- t.Error(err)
- }
- ctx := context.WithValue(r.Context(), httpserver.OriginalURLCtxKey, *r.URL)
- r = r.WithContext(ctx)
- To(fs, r, test.to, newReplacer(r))
- if uri(r.URL) != test.expected {
- t.Errorf("Test %v: expected %v found %v", i, test.expected, uri(r.URL))
- }
- }
-}
diff --git a/caddyhttp/root/root.go b/caddyhttp/root/root.go
deleted file mode 100644
index f9f11bfa2dd..00000000000
--- a/caddyhttp/root/root.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package root
-
-import (
- "log"
- "os"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func init() {
- caddy.RegisterPlugin("root", caddy.Plugin{
- ServerType: "http",
- Action: setupRoot,
- })
-}
-
-func setupRoot(c *caddy.Controller) error {
- config := httpserver.GetConfig(c)
-
- for c.Next() {
- if !c.NextArg() {
- return c.ArgErr()
- }
- config.Root = c.Val()
- if c.NextArg() {
- // only one argument allowed
- return c.ArgErr()
- }
- }
- //first check that the path is not a symlink, os.Stat panics when this is true
- info, _ := os.Lstat(config.Root)
- if info != nil && info.Mode()&os.ModeSymlink == os.ModeSymlink {
- //just print out info, delegate responsibility for symlink validity to
- //underlying Go framework, no need to test / verify twice
- log.Printf("[INFO] Root path is symlink: %s", config.Root)
- } else {
- // Check if root path exists
- _, err := os.Stat(config.Root)
- if err != nil {
- if os.IsNotExist(err) {
- // Allow this, because the folder might appear later.
- // But make sure the user knows!
- log.Printf("[WARNING] Root path does not exist: %s", config.Root)
- } else {
- return c.Errf("Unable to access root path '%s': %v", config.Root, err)
- }
- }
- }
-
- return nil
-}
diff --git a/caddyhttp/root/root_test.go b/caddyhttp/root/root_test.go
deleted file mode 100644
index e4ad8841bf8..00000000000
--- a/caddyhttp/root/root_test.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package root
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "testing"
-
- "github.com/mholt/caddy"
- "github.com/mholt/caddy/caddyhttp/httpserver"
-)
-
-func TestRoot(t *testing.T) {
- // Predefined error substrings
- parseErrContent := "Parse error:"
- unableToAccessErrContent := "Unable to access root path"
-
- existingDirPath, err := getTempDirPath()
- if err != nil {
- t.Fatalf("BeforeTest: Failed to find an existing directory for testing! Error was: %v", err)
- }
-
- nonExistingDir := filepath.Join(existingDirPath, "highly_unlikely_to_exist_dir")
-
- existingFile, err := ioutil.TempFile("", "root_test")
- if err != nil {
- t.Fatalf("BeforeTest: Failed to create temp file for testing! Error was: %v", err)
- }
- defer func() {
- existingFile.Close()
- os.Remove(existingFile.Name())
- }()
-
- inaccessiblePath := getInaccessiblePath(existingFile.Name())
-
- tests := []struct {
- input string
- shouldErr bool
- expectedRoot string // expected root, set to the controller. Empty for negative cases.
- expectedErrContent string // substring from the expected error. Empty for positive cases.
- }{
- // positive
- {
- fmt.Sprintf(`root %s`, nonExistingDir), false, nonExistingDir, "",
- },
- {
- fmt.Sprintf(`root %s`, existingDirPath), false, existingDirPath, "",
- },
- // negative
- {
- `root `, true, "", parseErrContent,
- },
- {
- `root /a /b`, true, "", parseErrContent,
- },
- {
- fmt.Sprintf(`root %s`, inaccessiblePath), true, "", unableToAccessErrContent,
- },
- {
- fmt.Sprintf(`root {
- %s
- }`, existingDirPath), true, "", parseErrContent,
- },
- }
-
- for i, test := range tests {
- c := caddy.NewTestController("http", test.input)
- err := setupRoot(c)
- cfg := httpserver.GetConfig(c)
-
- if test.shouldErr && err == nil {
- t.Errorf("Test %d: Expected error but got nil for input '%s'", i, test.input)
- }
-
- if err != nil {
- if !test.shouldErr {
- t.Errorf("Test %d: Expected no error but found one for input %s. Error was: %v", i, test.input, err)
- }
-
- if !strings.Contains(err.Error(), test.expectedErrContent) {
- t.Errorf("Test %d: Expected error to contain: %v, found error: %v, input: %s", i, test.expectedErrContent, err, test.input)
- }
- }
-
- // check root only if we are in a positive test.
- if !test.shouldErr && test.expectedRoot != cfg.Root {
- t.Errorf("Root not correctly set for input %s. Expected: %s, actual: %s", test.input, test.expectedRoot, cfg.Root)
- }
- }
-}
-
-// getTempDirPath returns the path to the system temp directory. If it does not exists - an error is returned.
-func getTempDirPath() (string, error) {
- tempDir := os.TempDir()
- _, err := os.Stat(tempDir)
- if err != nil {
- return "", err
- }
- return tempDir, nil
-}
-
-func getInaccessiblePath(file string) string {
- return filepath.Join("C:", "file\x00name") // null byte in filename is not allowed on Windows AND unix
-}
-
-func TestSymlinkRoot(t *testing.T) {
- origDir, err := ioutil.TempDir("", "root_test")
- if err != nil {
- t.Fatalf("BeforeTest: Failed to create temp dir for testing! Error was: %v", err)
- }
- defer func() {
- os.Remove(origDir)
- }()
-
- tempDir, err := getTempDirPath()
- if err != nil {
- t.Fatalf("BeforeTest: Failed to find an existing directory for testing! Error was: %v", err)
- }
- symlinkDir := filepath.Join(tempDir, "symlink")
-
- err = os.Symlink(origDir, symlinkDir)
- if err != nil {
- if strings.Contains(err.Error(), "A required privilege is not held by the client") {
- t.Skip("BeforeTest: A required privilege is not held by the client and is required to create a symlink to run this test.")
- }
- t.Fatalf("BeforeTest: Cannot create symlink! Error was: %v", err)
- }
- defer func() {
- os.Remove(symlinkDir)
- }()
-
- input := fmt.Sprintf(`root %s`, symlinkDir)
- c := caddy.NewTestController("http", input)
- err = setupRoot(c)
- _ = httpserver.GetConfig(c)
-
- if err != nil {
- t.Errorf("Test Symlink Root: Expected no error but found one for input %s. Error was: %v", input, err)
- }
-}
diff --git a/caddyhttp/staticfiles/fileserver.go b/caddyhttp/staticfiles/fileserver.go
deleted file mode 100644
index a66e1b13605..00000000000
--- a/caddyhttp/staticfiles/fileserver.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Package staticfiles provides middleware for serving static files from disk.
-// Its handler is the default HTTP handler for the HTTP server.
-//
-// TODO: Should this package be rolled into the httpserver package?
-package staticfiles
-
-import (
- "math/rand"
- "net/http"
- "os"
- "path"
- "path/filepath"
- "runtime"
- "strconv"
- "strings"
-
- "github.com/mholt/caddy"
-)
-
-// FileServer implements a production-ready file server
-// and is the 'default' handler for all requests to Caddy.
-// It simply loads and serves the URI requested. FileServer
-// is adapted from the one in net/http by the Go authors.
-// Significant modifications have been made.
-//
-// Original license:
-//
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-type FileServer struct {
- Root http.FileSystem // jailed access to the file system
- Hide []string // list of files for which to respond with "Not Found"
-}
-
-// ServeHTTP serves static files for r according to fs's configuration.
-func (fs FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
- return fs.serveFile(w, r)
-}
-
-// serveFile writes the specified file to the HTTP response.
-// name is '/'-separated, not filepath.Separator.
-func (fs FileServer) serveFile(w http.ResponseWriter, r *http.Request) (int, error) {
- reqPath := r.URL.Path
-
- // Prevent absolute path access on Windows.
- // TODO remove when stdlib http.Dir fixes this.
- if runtime.GOOS == "windows" && len(reqPath) > 0 && filepath.IsAbs(reqPath[1:]) {
- return http.StatusNotFound, nil
- }
-
- // open the requested file
- f, err := fs.Root.Open(reqPath)
- if err != nil {
- // TODO: remove when http.Dir handles this (Go 1.9?)
- // Go issue #18984
- err = mapFSRootOpenErr(err)
- if os.IsNotExist(err) {
- return http.StatusNotFound, nil
- } else if os.IsPermission(err) {
- return http.StatusForbidden, err
- }
- // otherwise, maybe the server is under load and ran out of file descriptors?
- backoff := int(3 + rand.Int31()%3) // 3–5 seconds to prevent a stampede
- w.Header().Set("Retry-After", strconv.Itoa(backoff))
- return http.StatusServiceUnavailable, err
- }
- defer f.Close()
-
- // get information about the file
- d, err := f.Stat()
- if err != nil {
- if os.IsNotExist(err) {
- return http.StatusNotFound, nil
- } else if os.IsPermission(err) {
- return http.StatusForbidden, err
- }
- // return a different status code than above to distinguish these cases
- return http.StatusInternalServerError, err
- }
-
- // redirect to canonical path (being careful to preserve other parts of URL and
- // considering cases where a site is defined with a path prefix that gets stripped)
- urlCopy := *r.URL
- pathPrefix, _ := r.Context().Value(caddy.CtxKey("path_prefix")).(string)
- if pathPrefix != "/" {
- urlCopy.Path = pathPrefix + urlCopy.Path
- }
- if urlCopy.Path == "" {
- urlCopy.Path = "/"
- }
- if d.IsDir() {
- // ensure there is a trailing slash
- if urlCopy.Path[len(urlCopy.Path)-1] != '/' {
- urlCopy.Path += "/"
- http.Redirect(w, r, urlCopy.String(), http.StatusMovedPermanently)
- return http.StatusMovedPermanently, nil
- }
- } else {
- // ensure no trailing slash
- redir := false
- if urlCopy.Path[len(urlCopy.Path)-1] == '/' {
- urlCopy.Path = urlCopy.Path[:len(urlCopy.Path)-1]
- redir = true
- }
-
- // if an index file was explicitly requested, strip file name from the request
- // ("/foo/index.html" -> "/foo/")
- var requestPage = path.Base(urlCopy.Path)
- for _, indexPage := range IndexPages {
- if requestPage == indexPage {
- urlCopy.Path = urlCopy.Path[:len(urlCopy.Path)-len(indexPage)]
- redir = true
- break
- }
- }
-
- if redir {
- http.Redirect(w, r, urlCopy.String(), http.StatusMovedPermanently)
- return http.StatusMovedPermanently, nil
- }
- }
-
- // use contents of an index file, if present, for directory requests
- if d.IsDir() {
- for _, indexPage := range IndexPages {
- indexPath := path.Join(reqPath, indexPage)
- indexFile, err := fs.Root.Open(indexPath)
- if err != nil {
- continue
- }
-
- indexInfo, err := indexFile.Stat()
- if err != nil {
- indexFile.Close()
- continue
- }
-
- // this defer does not leak fds even though we are in a loop,
- // because previous iterations of the loop must have had an
- // err, so there's nothing to close from earlier iterations.
- defer indexFile.Close()
-
- // close previously-opened file immediately to release fd
- f.Close()
-
- // switch to using the index file, and we're done here
- d = indexInfo
- f = indexFile
- reqPath = indexPath
- break
- }
- }
-
- // return Not Found if we either did not find an index file (and thus are
- // still a directory) or if this file is supposed to be hidden
- if d.IsDir() || fs.IsHidden(d) {
- return http.StatusNotFound, nil
- }
-
- etag := calculateEtag(d)
-
- // look for compressed versions of the file on disk, if the client supports that encoding
- for _, encoding := range staticEncodingPriority {
- // see if the client accepts a compressed encoding we offer
- acceptEncoding := strings.Split(r.Header.Get("Accept-Encoding"), ",")
- accepted := false
- for _, acc := range acceptEncoding {
- if strings.TrimSpace(acc) == encoding {
- accepted = true
- break
- }
- }
-
- // if client doesn't support this encoding, don't even bother; try next one
- if !accepted {
- continue
- }
-
- // see if the compressed version of this file exists
- encodedFile, err := fs.Root.Open(reqPath + staticEncoding[encoding])
- if err != nil {
- continue
- }
-
- encodedFileInfo, err := encodedFile.Stat()
- if err != nil {
- encodedFile.Close()
- continue
- }
-
- // close the encoded file when we're done, and close the
- // previously-opened file immediately to release the fd
- defer encodedFile.Close()
- f.Close()
-
- // the encoded file is now what we're serving
- f = encodedFile
- etag = calculateEtag(encodedFileInfo)
- w.Header().Add("Vary", "Accept-Encoding")
- w.Header().Set("Content-Encoding", encoding)
- w.Header().Set("Content-Length", strconv.FormatInt(encodedFileInfo.Size(), 10))
- break
- }
-
- // Set the ETag returned to the user-agent. Note that a conditional If-None-Match
- // request is handled in http.ServeContent below, which checks against this ETag value.
- w.Header().Set("ETag", etag)
-
- // Note: Errors generated by ServeContent are written immediately
- // to the response. This usually only happens if seeking fails (rare).
- // Its signature does not bubble the error up to us, so we cannot
- // return it for any logging middleware to record. Oh well.
- http.ServeContent(w, r, d.Name(), d.ModTime(), f)
-
- return http.StatusOK, nil
-}
-
-// IsHidden checks if file with FileInfo d is on hide list.
-func (fs FileServer) IsHidden(d os.FileInfo) bool {
- for _, hiddenPath := range fs.Hide {
- // TODO: Could these FileInfos be stored instead of their paths, to avoid opening them all the time?
- if hFile, err := fs.Root.Open(hiddenPath); err == nil {
- fs, _ := hFile.Stat()
- hFile.Close()
- if os.SameFile(d, fs) {
- return true
- }
- }
- }
- return false
-}
-
-// calculateEtag produces a strong etag by default, although, for
-// efficiency reasons, it does not actually consume the contents
-// of the file to make a hash of all the bytes. ¯\_(ツ)_/¯
-// Prefix the etag with "W/" to convert it into a weak etag.
-// See: https://tools.ietf.org/html/rfc7232#section-2.3
-func calculateEtag(d os.FileInfo) string {
- t := strconv.FormatInt(d.ModTime().Unix(), 36)
- s := strconv.FormatInt(d.Size(), 36)
- return `"` + t + s + `"`
-}
-
-// IndexPages is a list of pages that may be understood as
-// the "index" files to directories.
-var IndexPages = []string{
- "index.html",
- "index.htm",
- "index.txt",
- "default.html",
- "default.htm",
- "default.txt",
-}
-
-// staticEncoding is a map of content-encoding to a file extension.
-// If client accepts given encoding (via Accept-Encoding header) and compressed file with given extensions exists
-// it will be served to the client instead of original one.
-var staticEncoding = map[string]string{
- "gzip": ".gz",
- "br": ".br",
-}
-
-// staticEncodingPriority is a list of preferred static encodings (most efficient compression to least one).
-var staticEncodingPriority = []string{
- "br",
- "gzip",
-}
-
-// mapFSRootOpenErr maps the provided non-nil error
-// to a possibly better non-nil error. In particular, it turns OS-specific errors
-// about opening files in non-directories into os.ErrNotExist.
-//
-// TODO: remove when http.Dir handles this (slated for Go 1.9)
-// Go issue #18984
-func mapFSRootOpenErr(originalErr error) error {
- if os.IsNotExist(originalErr) || os.IsPermission(originalErr) {
- return originalErr
- }
-
- perr, ok := originalErr.(*os.PathError)
- if !ok {
- return originalErr
- }
- name := perr.Path
- parts := strings.Split(name, string(filepath.Separator))
- for i := range parts {
- if parts[i] == "" {
- continue
- }
- fi, err := os.Stat(strings.Join(parts[:i+1], string(filepath.Separator)))
- if err != nil {
- return originalErr
- }
- if !fi.IsDir() {
- return os.ErrNotExist
- }
- }
- return originalErr
-}
diff --git a/caddyhttp/staticfiles/fileserver_test.go b/caddyhttp/staticfiles/fileserver_test.go
deleted file mode 100644
index 715915b3c29..00000000000
--- a/caddyhttp/staticfiles/fileserver_test.go
+++ /dev/null
@@ -1,596 +0,0 @@
-package staticfiles
-
-import (
- "context"
- "errors"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "github.com/mholt/caddy"
-)
-
-// TestServeHTTP covers positive scenarios when serving files.
-func TestServeHTTP(t *testing.T) {
- tmpWebRootDir := beforeServeHTTPTest(t)
- defer afterServeHTTPTest(t, tmpWebRootDir)
-
- fileserver := FileServer{
- Root: http.Dir(filepath.Join(tmpWebRootDir, webrootName)),
- Hide: []string{"dir/hidden.html"},
- }
-
- movedPermanently := "Moved Permanently"
-
- tests := []struct {
- url string
- stripPathPrefix string // for when sites are defined with a path (e.g. "example.com/foo/")
- acceptEncoding string
- expectedLocation string
- expectedStatus int
- expectedBodyContent string
- expectedEtag string
- expectedVary string
- expectedEncoding string
- expectedContentLength string
- }{
- // Test 0 - access without any path
- {
- url: "https://foo",
- expectedStatus: http.StatusNotFound,
- },
- // Test 1 - access root (without index.html)
- {
- url: "https://foo/",
- expectedStatus: http.StatusNotFound,
- },
- // Test 2 - access existing file
- {
- url: "https://foo/file1.html",
- expectedStatus: http.StatusOK,
- expectedBodyContent: testFiles[webrootFile1HTML],
- expectedEtag: `"2n9cj"`,
- expectedContentLength: strconv.Itoa(len(testFiles[webrootFile1HTML])),
- },
- // Test 3 - access folder with index file with trailing slash
- {
- url: "https://foo/dirwithindex/",
- expectedStatus: http.StatusOK,
- expectedBodyContent: testFiles[webrootDirwithindexIndeHTML],
- expectedEtag: `"2n9cw"`,
- expectedContentLength: strconv.Itoa(len(testFiles[webrootDirwithindexIndeHTML])),
- },
- // Test 4 - access folder with index file without trailing slash
- {
- url: "https://foo/dirwithindex",
- expectedStatus: http.StatusMovedPermanently,
- expectedLocation: "https://foo/dirwithindex/",
- expectedBodyContent: movedPermanently,
- },
- // Test 5 - access folder without index file
- {
- url: "https://foo/dir/",
- expectedStatus: http.StatusNotFound,
- },
- // Test 6 - access folder without trailing slash
- {
- url: "https://foo/dir",
- expectedStatus: http.StatusMovedPermanently,
- expectedLocation: "https://foo/dir/",
- expectedBodyContent: movedPermanently,
- },
- // Test 7 - access file with trailing slash
- {
- url: "https://foo/file1.html/",
- expectedStatus: http.StatusMovedPermanently,
- expectedLocation: "https://foo/file1.html",
- expectedBodyContent: movedPermanently,
- },
- // Test 8 - access not existing path
- {
- url: "https://foo/not_existing",
- expectedStatus: http.StatusNotFound,
- },
- // Test 9 - access a file, marked as hidden
- {
- url: "https://foo/dir/hidden.html",
- expectedStatus: http.StatusNotFound,
- },
- // Test 10 - access an index file directly
- {
- url: "https://foo/dirwithindex/index.html",
- expectedStatus: http.StatusMovedPermanently,
- expectedLocation: "https://foo/dirwithindex/",
- },
- // Test 11 - access an index file with a trailing slash
- {
- url: "https://foo/dirwithindex/index.html/",
- expectedStatus: http.StatusMovedPermanently,
- expectedLocation: "https://foo/dirwithindex/",
- },
- // Test 12 - send a request with query params
- {
- url: "https://foo/dir?param1=val",
- expectedStatus: http.StatusMovedPermanently,
- expectedLocation: "https://foo/dir/?param1=val",
- expectedBodyContent: movedPermanently,
- },
- // Test 13 - attempt to bypass hidden file
- {
- url: "https://foo/dir/hidden.html%20",
- expectedStatus: http.StatusNotFound,
- },
- // Test 14 - attempt to bypass hidden file
- {
- url: "https://foo/dir/hidden.html.",
- expectedStatus: http.StatusNotFound,
- },
- // Test 15 - attempt to bypass hidden file
- {
- url: "https://foo/dir/hidden.html.%20",
- expectedStatus: http.StatusNotFound,
- },
- // Test 16 - attempt to bypass hidden file
- {
- url: "https://foo/dir/hidden.html%20.",
- acceptEncoding: "br, gzip",
- expectedStatus: http.StatusNotFound,
- },
- // Test 17 - serve another file with same name as hidden file.
- {
- url: "https://foo/hidden.html",
- expectedStatus: http.StatusNotFound,
- },
- // Test 18 - try to get below the root directory.
- {
- url: "https://foo/../unreachable.html",
- expectedStatus: http.StatusNotFound,
- },
- // Test 19 - try to get below the root directory (encoded slashes).
- {
- url: "https://foo/..%2funreachable.html",
- expectedStatus: http.StatusNotFound,
- },
- // Test 20 - try to get pre-gzipped file.
- {
- url: "https://foo/sub/gzipped.html",
- acceptEncoding: "gzip",
- expectedStatus: http.StatusOK,
- expectedBodyContent: testFiles[webrootSubGzippedHTMLGz],
- expectedEtag: `"2n9ch"`,
- expectedVary: "Accept-Encoding",
- expectedEncoding: "gzip",
- expectedContentLength: strconv.Itoa(len(testFiles[webrootSubGzippedHTMLGz])),
- },
- // Test 21 - try to get pre-brotli encoded file.
- {
- url: "https://foo/sub/brotli.html",
- acceptEncoding: "br,gzip",
- expectedStatus: http.StatusOK,
- expectedBodyContent: testFiles[webrootSubBrotliHTMLBr],
- expectedEtag: `"2n9cg"`,
- expectedVary: "Accept-Encoding",
- expectedEncoding: "br",
- expectedContentLength: strconv.Itoa(len(testFiles[webrootSubBrotliHTMLBr])),
- },
- // Test 22 - not allowed to get pre-brotli encoded file.
- {
- url: "https://foo/sub/brotli.html",
- acceptEncoding: "nicebrew", // contains "br" substring but not "br"
- expectedStatus: http.StatusOK,
- expectedBodyContent: testFiles[webrootSubBrotliHTML],
- expectedEtag: `"2n9cd"`,
- expectedVary: "",
- expectedEncoding: "",
- expectedContentLength: strconv.Itoa(len(testFiles[webrootSubBrotliHTML])),
- },
- // Test 23 - treat existing file as a directory.
- {
- url: "https://foo/file1.html/other",
- expectedStatus: http.StatusNotFound,
- },
- // Test 24 - access folder with index file without trailing slash, with stripped path
- {
- url: "https://foo/bar/dirwithindex",
- stripPathPrefix: "/bar/",
- expectedStatus: http.StatusMovedPermanently,
- expectedLocation: "https://foo/bar/dirwithindex/",
- expectedBodyContent: movedPermanently,
- },
- // Test 25 - access folder with index file without trailing slash, with stripped path and query params
- {
- url: "https://foo/bar/dirwithindex?param1=val",
- stripPathPrefix: "/bar/",
- expectedStatus: http.StatusMovedPermanently,
- expectedLocation: "https://foo/bar/dirwithindex/?param1=val",
- expectedBodyContent: movedPermanently,
- },
- // Test 26 - site defined with path ("bar"), which has that prefix stripped
- {
- url: "https://foo/bar/file1.html/",
- stripPathPrefix: "/bar/",
- expectedStatus: http.StatusMovedPermanently,
- expectedLocation: "https://foo/bar/file1.html",
- expectedBodyContent: movedPermanently,
- },
- {
- url: "https://foo/notindex.html",
- expectedStatus: http.StatusOK,
- expectedBodyContent: testFiles[webrootNotIndexHTML],
- expectedEtag: `"2n9cm"`,
- expectedContentLength: strconv.Itoa(len(testFiles[webrootNotIndexHTML])),
- },
- }
-
- for i, test := range tests {
- // set up response writer and rewuest
- responseRecorder := httptest.NewRecorder()
- request, err := http.NewRequest("GET", test.url, nil)
- if err != nil {
- t.Errorf("Test %d: Error making request: %v", i, err)
- continue
- }
-
- // set the original URL and path prefix on the context
- ctx := context.WithValue(request.Context(), caddy.CtxKey("original_url"), *request.URL)
- request = request.WithContext(ctx)
- ctx = context.WithValue(request.Context(), caddy.CtxKey("path_prefix"), test.stripPathPrefix)
- request = request.WithContext(ctx)
-
- request.Header.Add("Accept-Encoding", test.acceptEncoding)
-
- // simulate cases where a site is defined with a path prefix (e.g. "localhost/foo/")
- if test.stripPathPrefix != "" {
- request.URL.Path = strings.TrimPrefix(request.URL.Path, test.stripPathPrefix)
- }
-
- // perform the test
- status, err := fileserver.ServeHTTP(responseRecorder, request)
- etag := responseRecorder.Header().Get("Etag")
- body := responseRecorder.Body.String()
- vary := responseRecorder.Header().Get("Vary")
- encoding := responseRecorder.Header().Get("Content-Encoding")
- length := responseRecorder.Header().Get("Content-Length")
-
- // check if error matches expectations
- if err != nil {
- t.Errorf("Test %d: Serving file at %s failed. Error was: %v", i, test.url, err)
- }
-
- // check status code
- if test.expectedStatus != status {
- t.Errorf("Test %d: Expected status %d, found %d", i, test.expectedStatus, status)
- }
-
- // check etag
- if test.expectedEtag != etag {
- t.Errorf("Test %d: Expected Etag header %s, found %s", i, test.expectedEtag, etag)
- }
-
- // check vary
- if test.expectedVary != vary {
- t.Errorf("Test %d: Expected Vary header %s, found %s", i, test.expectedVary, vary)
- }
-
- // check content-encoding
- if test.expectedEncoding != encoding {
- t.Errorf("Test %d: Expected Content-Encoding header %s, found %s", i, test.expectedEncoding, encoding)
- }
-
- // check body content
- if !strings.Contains(body, test.expectedBodyContent) {
- t.Errorf("Test %d: Expected body to contain %q, found %q", i, test.expectedBodyContent, body)
- }
-
- // check Location header
- if test.expectedLocation != "" {
- l := responseRecorder.Header().Get("Location")
- if test.expectedLocation != l {
- t.Errorf("Test %d: Expected Location header %q, found %q", i, test.expectedLocation, l)
- }
- }
-
- // check content length
- if test.expectedContentLength != length {
- t.Errorf("Test %d: Expected Content-Length header %s, found %s", i, test.expectedContentLength, length)
- }
- }
-
-}
-
-// beforeServeHTTPTest creates a test directory with the structure, defined in the variable testFiles
-func beforeServeHTTPTest(t *testing.T) string {
- tmpdir, err := ioutil.TempDir("", testDirPrefix)
- if err != nil {
- t.Fatalf("failed to create test directory: %v", err)
- }
-
- fixedTime := time.Unix(123456, 0)
-
- for relFile, fileContent := range testFiles {
- absFile := filepath.Join(tmpdir, relFile)
-
- // make sure the parent directories exist
- parentDir := filepath.Dir(absFile)
- _, err = os.Stat(parentDir)
- if err != nil {
- os.MkdirAll(parentDir, os.ModePerm)
- }
-
- // now create the test files
- f, err := os.Create(absFile)
- if err != nil {
- t.Fatalf("Failed to create test file %s. Error was: %v", absFile, err)
- }
-
- // and fill them with content
- _, err = f.WriteString(fileContent)
- if err != nil {
- t.Fatalf("Failed to write to %s. Error was: %v", absFile, err)
- }
- f.Close()
-
- // and set the last modified time
- err = os.Chtimes(absFile, fixedTime, fixedTime)
- if err != nil {
- t.Fatalf("Failed to set file time to %s. Error was: %v", fixedTime, err)
- }
- }
-
- return tmpdir
-}
-
-// afterServeHTTPTest removes the test dir and all its content
-func afterServeHTTPTest(t *testing.T, webroot string) {
- if !strings.Contains(webroot, testDirPrefix) {
- t.Fatalf("Cannot clean up after test because webroot is: %s", webroot)
- }
- // cleans up everything under the test dir. No need to clean the individual files.
- err := os.RemoveAll(webroot)
- if err != nil {
- t.Fatalf("Failed to clean up test dir %s. Error was: %v", webroot, err)
- }
-}
-
-// failingFS implements the http.FileSystem interface. The Open method always returns the error, assigned to err
-type failingFS struct {
- err error // the error to return when Open is called
- fileImpl http.File // inject the file implementation
-}
-
-// Open returns the assigned failingFile and error
-func (f failingFS) Open(path string) (http.File, error) {
- return f.fileImpl, f.err
-}
-
-// failingFile implements http.File but returns a predefined error on every Stat() method call.
-type failingFile struct {
- http.File
- err error
-}
-
-// Stat returns nil FileInfo and the provided error on every call
-func (ff failingFile) Stat() (os.FileInfo, error) {
- return nil, ff.err
-}
-
-// Close is noop and returns no error
-func (ff failingFile) Close() error {
- return nil
-}
-
-// TestServeHTTPFailingFS tests error cases where the Open
-// function fails with various errors.
-func TestServeHTTPFailingFS(t *testing.T) {
- tests := []struct {
- fsErr error
- expectedStatus int
- expectedErr error
- expectedHeaders map[string]string
- }{
- {
- fsErr: os.ErrNotExist,
- expectedStatus: http.StatusNotFound,
- expectedErr: nil,
- },
- {
- fsErr: os.ErrPermission,
- expectedStatus: http.StatusForbidden,
- expectedErr: os.ErrPermission,
- },
- {
- fsErr: errCustom,
- expectedStatus: http.StatusServiceUnavailable,
- expectedErr: errCustom,
- expectedHeaders: map[string]string{"Retry-After": "5"},
- },
- }
-
- for i, test := range tests {
- // initialize a file server with the failing FileSystem
- fileserver := FileServer{Root: failingFS{err: test.fsErr}}
-
- // prepare the request and response
- request, err := http.NewRequest("GET", "https://foo/", nil)
- if err != nil {
- t.Fatalf("Failed to build request. Error was: %v", err)
- }
- responseRecorder := httptest.NewRecorder()
-
- status, actualErr := fileserver.ServeHTTP(responseRecorder, request)
-
- // check the status
- if status != test.expectedStatus {
- t.Errorf("Test %d: Expected status %d, found %d", i, test.expectedStatus, status)
- }
-
- // check the error
- if actualErr != test.expectedErr {
- t.Errorf("Test %d: Expected err %v, found %v", i, test.expectedErr, actualErr)
- }
-
- // check the headers - a special case for server under load
- if test.expectedHeaders != nil && len(test.expectedHeaders) > 0 {
- for expectedKey, expectedVal := range test.expectedHeaders {
- actualVal := responseRecorder.Header().Get(expectedKey)
- if expectedVal != actualVal {
- t.Errorf("Test %d: Expected header %s: %s, found %s", i, expectedKey, expectedVal, actualVal)
- }
- }
- }
- }
-}
-
-// TestServeHTTPFailingStat tests error cases where the initial Open function succeeds,
-// but the Stat method on the opened file fails.
-func TestServeHTTPFailingStat(t *testing.T) {
- tests := []struct {
- statErr error
- expectedStatus int
- expectedErr error
- }{
- {
- statErr: os.ErrNotExist,
- expectedStatus: http.StatusNotFound,
- expectedErr: nil,
- },
- {
- statErr: os.ErrPermission,
- expectedStatus: http.StatusForbidden,
- expectedErr: os.ErrPermission,
- },
- {
- statErr: errCustom,
- expectedStatus: http.StatusInternalServerError,
- expectedErr: errCustom,
- },
- }
-
- for i, test := range tests {
- // initialize a file server. The FileSystem will not fail, but calls to the Stat method of the returned File object will
- fileserver := FileServer{Root: failingFS{err: nil, fileImpl: failingFile{err: test.statErr}}}
-
- // prepare the request and response
- request, err := http.NewRequest("GET", "https://foo/", nil)
- if err != nil {
- t.Fatalf("Failed to build request. Error was: %v", err)
- }
- responseRecorder := httptest.NewRecorder()
-
- status, actualErr := fileserver.ServeHTTP(responseRecorder, request)
-
- // check the status
- if status != test.expectedStatus {
- t.Errorf("Test %d: Expected status %d, found %d", i, test.expectedStatus, status)
- }
-
- // check the error
- if actualErr != test.expectedErr {
- t.Errorf("Test %d: Expected err %v, found %v", i, test.expectedErr, actualErr)
- }
- }
-}
-
-// Paths for the fake site used temporarily during testing.
-var (
- webrootFile1HTML = filepath.Join(webrootName, "file1.html")
- webrootNotIndexHTML = filepath.Join(webrootName, "notindex.html")
- webrootDirFile2HTML = filepath.Join(webrootName, "dir", "file2.html")
- webrootDirHiddenHTML = filepath.Join(webrootName, "dir", "hidden.html")
- webrootDirwithindexIndeHTML = filepath.Join(webrootName, "dirwithindex", "index.html")
- webrootSubGzippedHTML = filepath.Join(webrootName, "sub", "gzipped.html")
- webrootSubGzippedHTMLGz = filepath.Join(webrootName, "sub", "gzipped.html.gz")
- webrootSubGzippedHTMLBr = filepath.Join(webrootName, "sub", "gzipped.html.br")
- webrootSubBrotliHTML = filepath.Join(webrootName, "sub", "brotli.html")
- webrootSubBrotliHTMLGz = filepath.Join(webrootName, "sub", "brotli.html.gz")
- webrootSubBrotliHTMLBr = filepath.Join(webrootName, "sub", "brotli.html.br")
- webrootSubBarDirWithIndexIndexHTML = filepath.Join(webrootName, "bar", "dirwithindex", "index.html")
-)
-
-// testFiles is a map with relative paths to test files as keys and file content as values.
-// The map represents the following structure:
-// - $TEMP/caddy_testdir/
-// '-- unreachable.html
-// '-- webroot/
-// '---- file1.html
-// '---- dirwithindex/
-// '------ index.html
-// '---- dir/
-// '------ file2.html
-// '------ hidden.html
-var testFiles = map[string]string{
- "unreachable.html": "
+
+
+
+
+
diff --git a/modules/caddyhttp/fileserver/browsetplcontext.go b/modules/caddyhttp/fileserver/browsetplcontext.go
new file mode 100644
index 00000000000..b9489c6a6dc
--- /dev/null
+++ b/modules/caddyhttp/fileserver/browsetplcontext.go
@@ -0,0 +1,383 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileserver
+
+import (
+ "context"
+ "io/fs"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func (fsrv *FileServer) directoryListing(ctx context.Context, fileSystem fs.FS, parentModTime time.Time, entries []fs.DirEntry, canGoUp bool, root, urlPath string, repl *caddy.Replacer) *browseTemplateContext {
+ filesToHide := fsrv.transformHidePaths(repl)
+
+ name, _ := url.PathUnescape(urlPath)
+
+ tplCtx := &browseTemplateContext{
+ Name: path.Base(name),
+ Path: urlPath,
+ CanGoUp: canGoUp,
+ lastModified: parentModTime,
+ }
+
+ for _, entry := range entries {
+ if err := ctx.Err(); err != nil {
+ break
+ }
+
+ name := entry.Name()
+
+ if fileHidden(name, filesToHide) {
+ continue
+ }
+
+ info, err := entry.Info()
+ if err != nil {
+ if c := fsrv.logger.Check(zapcore.ErrorLevel, "could not get info about directory entry"); c != nil {
+ c.Write(zap.String("name", entry.Name()), zap.String("root", root))
+ }
+ continue
+ }
+
+ // keep track of the most recently modified item in the listing
+ modTime := info.ModTime()
+ if tplCtx.lastModified.IsZero() || modTime.After(tplCtx.lastModified) {
+ tplCtx.lastModified = modTime
+ }
+
+ isDir := entry.IsDir() || fsrv.isSymlinkTargetDir(fileSystem, info, root, urlPath)
+
+ // add the slash after the escape of path to avoid escaping the slash as well
+ if isDir {
+ name += "/"
+ tplCtx.NumDirs++
+ } else {
+ tplCtx.NumFiles++
+ }
+
+ size := info.Size()
+
+ if !isDir {
+ // increase the total by the symlink's size, not the target's size,
+ // by incrementing before we follow the symlink
+ tplCtx.TotalFileSize += size
+ }
+
+ fileIsSymlink := isSymlink(info)
+ symlinkPath := ""
+ if fileIsSymlink {
+ path := caddyhttp.SanitizedPathJoin(root, path.Join(urlPath, info.Name()))
+ fileInfo, err := fs.Stat(fileSystem, path)
+ if err == nil {
+ size = fileInfo.Size()
+ }
+
+ if fsrv.Browse.RevealSymlinks {
+ symLinkTarget, err := filepath.EvalSymlinks(path)
+ if err == nil {
+ symlinkPath = symLinkTarget
+ }
+ }
+
+ // An error most likely means the symlink target doesn't exist,
+ // which isn't entirely unusual and shouldn't fail the listing.
+ // In this case, just use the size of the symlink itself, which
+ // was already set above.
+ }
+
+ if !isDir {
+ // increase the total including the symlink target's size
+ tplCtx.TotalFileSizeFollowingSymlinks += size
+ }
+
+ u := url.URL{Path: "./" + name} // prepend with "./" to fix paths with ':' in the name
+
+ tplCtx.Items = append(tplCtx.Items, fileInfo{
+ IsDir: isDir,
+ IsSymlink: fileIsSymlink,
+ Name: name,
+ Size: size,
+ URL: u.String(),
+ ModTime: modTime.UTC(),
+ Mode: info.Mode(),
+ Tpl: tplCtx, // a reference up to the template context is useful
+ SymlinkPath: symlinkPath,
+ })
+ }
+
+ // this time is used for the Last-Modified header and comparing If-Modified-Since from client
+ // both are expected to be in UTC, so we convert to UTC here
+ // see: https://github.com/caddyserver/caddy/issues/6828
+ tplCtx.lastModified = tplCtx.lastModified.UTC()
+ return tplCtx
+}
+
+// browseTemplateContext provides the template context for directory listings.
+type browseTemplateContext struct {
+ // The name of the directory (the last element of the path).
+ Name string `json:"name"`
+
+ // The full path of the request.
+ Path string `json:"path"`
+
+ // Whether the parent directory is browsable.
+ CanGoUp bool `json:"can_go_up"`
+
+ // The items (files and folders) in the path.
+ Items []fileInfo `json:"items,omitempty"`
+
+ // If ≠0 then Items starting from that many elements.
+ Offset int `json:"offset,omitempty"`
+
+ // If ≠0 then Items have been limited to that many elements.
+ Limit int `json:"limit,omitempty"`
+
+ // The number of directories in the listing.
+ NumDirs int `json:"num_dirs"`
+
+ // The number of files (items that aren't directories) in the listing.
+ NumFiles int `json:"num_files"`
+
+ // The total size of all files in the listing. Only includes the
+ // size of the files themselves, not the size of symlink targets
+ // (i.e. the calculation of this value does not follow symlinks).
+ TotalFileSize int64 `json:"total_file_size"`
+
+ // The total size of all files in the listing, including the
+ // size of the files targeted by symlinks.
+ TotalFileSizeFollowingSymlinks int64 `json:"total_file_size_following_symlinks"`
+
+ // Sort column used
+ Sort string `json:"sort,omitempty"`
+
+ // Sorting order
+ Order string `json:"order,omitempty"`
+
+ // Display format (list or grid)
+ Layout string `json:"layout,omitempty"`
+
+ // The most recent file modification date in the listing.
+ // Used for HTTP header purposes.
+ lastModified time.Time
+}
+
+// Breadcrumbs returns l.Path where every element maps
+// the link to the text to display.
+func (l browseTemplateContext) Breadcrumbs() []crumb {
+ if len(l.Path) == 0 {
+ return []crumb{}
+ }
+
+ // skip trailing slash
+ lpath := l.Path
+ if lpath[len(lpath)-1] == '/' {
+ lpath = lpath[:len(lpath)-1]
+ }
+ parts := strings.Split(lpath, "/")
+ result := make([]crumb, len(parts))
+ for i, p := range parts {
+ if i == 0 && p == "" {
+ p = "/"
+ }
+ // the directory name could include an encoded slash in its path,
+ // so the item name should be unescaped in the loop rather than unescaping the
+ // entire path outside the loop.
+ p, _ = url.PathUnescape(p)
+ lnk := strings.Repeat("../", len(parts)-i-1)
+ result[i] = crumb{Link: lnk, Text: p}
+ }
+
+ return result
+}
+
+func (l *browseTemplateContext) applySortAndLimit(sortParam, orderParam, limitParam string, offsetParam string) {
+ l.Sort = sortParam
+ l.Order = orderParam
+
+ if l.Order == "desc" {
+ switch l.Sort {
+ case sortByName:
+ sort.Sort(sort.Reverse(byName(*l)))
+ case sortByNameDirFirst:
+ sort.Sort(sort.Reverse(byNameDirFirst(*l)))
+ case sortBySize:
+ sort.Sort(sort.Reverse(bySize(*l)))
+ case sortByTime:
+ sort.Sort(sort.Reverse(byTime(*l)))
+ }
+ } else {
+ switch l.Sort {
+ case sortByName:
+ sort.Sort(byName(*l))
+ case sortByNameDirFirst:
+ sort.Sort(byNameDirFirst(*l))
+ case sortBySize:
+ sort.Sort(bySize(*l))
+ case sortByTime:
+ sort.Sort(byTime(*l))
+ }
+ }
+
+ if offsetParam != "" {
+ offset, _ := strconv.Atoi(offsetParam)
+ if offset > 0 && offset <= len(l.Items) {
+ l.Items = l.Items[offset:]
+ l.Offset = offset
+ }
+ }
+
+ if limitParam != "" {
+ limit, _ := strconv.Atoi(limitParam)
+
+ if limit > 0 && limit <= len(l.Items) {
+ l.Items = l.Items[:limit]
+ l.Limit = limit
+ }
+ }
+}
+
+// crumb represents part of a breadcrumb menu,
+// pairing a link with the text to display.
+type crumb struct {
+ Link, Text string
+}
+
+// fileInfo contains serializable information
+// about a file or directory.
+type fileInfo struct {
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ URL string `json:"url"`
+ ModTime time.Time `json:"mod_time"`
+ Mode os.FileMode `json:"mode"`
+ IsDir bool `json:"is_dir"`
+ IsSymlink bool `json:"is_symlink"`
+ SymlinkPath string `json:"symlink_path,omitempty"`
+
+ // a pointer to the template context is useful inside nested templates
+ Tpl *browseTemplateContext `json:"-"`
+}
+
+// HasExt returns true if the filename has any of the given suffixes, case-insensitive.
+func (fi fileInfo) HasExt(exts ...string) bool {
+ return slices.ContainsFunc(exts, func(ext string) bool {
+ return strings.HasSuffix(strings.ToLower(fi.Name), strings.ToLower(ext))
+ })
+}
+
+// HumanSize returns the size of the file as a
+// human-readable string in IEC format (i.e.
+// power of 2 or base 1024).
+func (fi fileInfo) HumanSize() string {
+ return humanize.IBytes(uint64(fi.Size))
+}
+
+// HumanTotalFileSize returns the total size of all files
+// in the listing as a human-readable string in IEC format
+// (i.e. power of 2 or base 1024).
+func (btc browseTemplateContext) HumanTotalFileSize() string {
+ return humanize.IBytes(uint64(btc.TotalFileSize))
+}
+
+// HumanTotalFileSizeFollowingSymlinks is the same as HumanTotalFileSize
+// except the returned value reflects the size of symlink targets.
+func (btc browseTemplateContext) HumanTotalFileSizeFollowingSymlinks() string {
+ return humanize.IBytes(uint64(btc.TotalFileSizeFollowingSymlinks))
+}
+
+// HumanModTime returns the modified time of the file
+// as a human-readable string given by format.
+func (fi fileInfo) HumanModTime(format string) string {
+ return fi.ModTime.Format(format)
+}
+
+type (
+ byName browseTemplateContext
+ byNameDirFirst browseTemplateContext
+ bySize browseTemplateContext
+ byTime browseTemplateContext
+)
+
+func (l byName) Len() int { return len(l.Items) }
+func (l byName) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
+
+func (l byName) Less(i, j int) bool {
+ return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
+}
+
+func (l byNameDirFirst) Len() int { return len(l.Items) }
+func (l byNameDirFirst) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
+
+func (l byNameDirFirst) Less(i, j int) bool {
+ // sort by name if both are dir or file
+ if l.Items[i].IsDir == l.Items[j].IsDir {
+ return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
+ }
+ // sort dir ahead of file
+ return l.Items[i].IsDir
+}
+
+func (l bySize) Len() int { return len(l.Items) }
+func (l bySize) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
+
+func (l bySize) Less(i, j int) bool {
+ const directoryOffset = -1 << 31 // = -math.MinInt32
+
+ iSize, jSize := l.Items[i].Size, l.Items[j].Size
+
+ // directory sizes depend on the file system; to
+ // provide a consistent experience, put them up front
+ // and sort them by name
+ if l.Items[i].IsDir {
+ iSize = directoryOffset
+ }
+ if l.Items[j].IsDir {
+ jSize = directoryOffset
+ }
+ if l.Items[i].IsDir && l.Items[j].IsDir {
+ return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name)
+ }
+
+ return iSize < jSize
+}
+
+func (l byTime) Len() int { return len(l.Items) }
+func (l byTime) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
+func (l byTime) Less(i, j int) bool { return l.Items[i].ModTime.Before(l.Items[j].ModTime) }
+
+const (
+ sortByName = "name"
+ sortByNameDirFirst = "namedirfirst"
+ sortBySize = "size"
+ sortByTime = "time"
+
+ sortOrderAsc = "asc"
+ sortOrderDesc = "desc"
+)
diff --git a/modules/caddyhttp/fileserver/browsetplcontext_test.go b/modules/caddyhttp/fileserver/browsetplcontext_test.go
new file mode 100644
index 00000000000..184196fa8c4
--- /dev/null
+++ b/modules/caddyhttp/fileserver/browsetplcontext_test.go
@@ -0,0 +1,106 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileserver
+
+import (
+ "testing"
+)
+
+func TestBreadcrumbs(t *testing.T) {
+ testdata := []struct {
+ path string
+ expected []crumb
+ }{
+ {"", []crumb{}},
+ {"/", []crumb{{Text: "/"}}},
+ {"/foo/", []crumb{
+ {Link: "../", Text: "/"},
+ {Link: "", Text: "foo"},
+ }},
+ {"/foo/bar/", []crumb{
+ {Link: "../../", Text: "/"},
+ {Link: "../", Text: "foo"},
+ {Link: "", Text: "bar"},
+ }},
+ {"/foo bar/", []crumb{
+ {Link: "../", Text: "/"},
+ {Link: "", Text: "foo bar"},
+ }},
+ {"/foo bar/baz/", []crumb{
+ {Link: "../../", Text: "/"},
+ {Link: "../", Text: "foo bar"},
+ {Link: "", Text: "baz"},
+ }},
+ {"/100%25 test coverage/is a lie/", []crumb{
+ {Link: "../../", Text: "/"},
+ {Link: "../", Text: "100% test coverage"},
+ {Link: "", Text: "is a lie"},
+ }},
+ {"/AC%2FDC/", []crumb{
+ {Link: "../", Text: "/"},
+ {Link: "", Text: "AC/DC"},
+ }},
+ {"/foo/%2e%2e%2f/bar", []crumb{
+ {Link: "../../../", Text: "/"},
+ {Link: "../../", Text: "foo"},
+ {Link: "../", Text: "../"},
+ {Link: "", Text: "bar"},
+ }},
+ {"/foo/../bar", []crumb{
+ {Link: "../../../", Text: "/"},
+ {Link: "../../", Text: "foo"},
+ {Link: "../", Text: ".."},
+ {Link: "", Text: "bar"},
+ }},
+ {"foo/bar/baz", []crumb{
+ {Link: "../../", Text: "foo"},
+ {Link: "../", Text: "bar"},
+ {Link: "", Text: "baz"},
+ }},
+ {"/qux/quux/corge/", []crumb{
+ {Link: "../../../", Text: "/"},
+ {Link: "../../", Text: "qux"},
+ {Link: "../", Text: "quux"},
+ {Link: "", Text: "corge"},
+ }},
+ {"/مجلد/", []crumb{
+ {Link: "../", Text: "/"},
+ {Link: "", Text: "مجلد"},
+ }},
+ {"/مجلد-1/مجلد-2", []crumb{
+ {Link: "../../", Text: "/"},
+ {Link: "../", Text: "مجلد-1"},
+ {Link: "", Text: "مجلد-2"},
+ }},
+ {"/مجلد%2F1", []crumb{
+ {Link: "../", Text: "/"},
+ {Link: "", Text: "مجلد/1"},
+ }},
+ }
+
+ for testNum, d := range testdata {
+ l := browseTemplateContext{Path: d.path}
+ actual := l.Breadcrumbs()
+ if len(actual) != len(d.expected) {
+ t.Errorf("Test %d: Got %d components but expected %d; got: %+v", testNum, len(actual), len(d.expected), actual)
+ continue
+ }
+ for i, c := range actual {
+ if c != d.expected[i] {
+ t.Errorf("Test %d crumb %d: got %#v but expected %#v at index %d", testNum, i, c, d.expected[i], i)
+ }
+ }
+ }
+}
diff --git a/modules/caddyhttp/fileserver/caddyfile.go b/modules/caddyhttp/fileserver/caddyfile.go
new file mode 100644
index 00000000000..80a37322bca
--- /dev/null
+++ b/modules/caddyhttp/fileserver/caddyfile.go
@@ -0,0 +1,329 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileserver
+
+import (
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite"
+)
+
+func init() {
+ httpcaddyfile.RegisterHandlerDirective("file_server", parseCaddyfile)
+ httpcaddyfile.RegisterDirective("try_files", parseTryFiles)
+}
+
+// parseCaddyfile parses the file_server directive.
+// See UnmarshalCaddyfile for the syntax.
+func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
+ fsrv := new(FileServer)
+ err := fsrv.UnmarshalCaddyfile(h.Dispenser)
+ if err != nil {
+ return fsrv, err
+ }
+ err = fsrv.FinalizeUnmarshalCaddyfile(h)
+ if err != nil {
+ return nil, err
+ }
+ return fsrv, err
+}
+
+// UnmarshalCaddyfile parses the file_server directive. It enables
+// the static file server and configures it with this syntax:
+//
+// file_server [] [browse] {
+// fs
+// root
+// hide
+// index
+// browse []
+// precompressed
+// status
+// disable_canonical_uris
+// }
+//
+// The FinalizeUnmarshalCaddyfile method should be called after this
+// to finalize setup of hidden Caddyfiles.
+func (fsrv *FileServer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ d.Next() // consume directive name
+
+ args := d.RemainingArgs()
+ switch len(args) {
+ case 0:
+ case 1:
+ if args[0] != "browse" {
+ return d.ArgErr()
+ }
+ fsrv.Browse = new(Browse)
+ default:
+ return d.ArgErr()
+ }
+
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "fs":
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ if fsrv.FileSystem != "" {
+ return d.Err("file system already specified")
+ }
+ fsrv.FileSystem = d.Val()
+
+ case "hide":
+ fsrv.Hide = d.RemainingArgs()
+ if len(fsrv.Hide) == 0 {
+ return d.ArgErr()
+ }
+
+ case "index":
+ fsrv.IndexNames = d.RemainingArgs()
+ if len(fsrv.IndexNames) == 0 {
+ return d.ArgErr()
+ }
+
+ case "root":
+ if !d.Args(&fsrv.Root) {
+ return d.ArgErr()
+ }
+
+ case "browse":
+ if fsrv.Browse != nil {
+ return d.Err("browsing is already configured")
+ }
+ fsrv.Browse = new(Browse)
+ d.Args(&fsrv.Browse.TemplateFile)
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ switch d.Val() {
+ case "reveal_symlinks":
+ if fsrv.Browse.RevealSymlinks {
+ return d.Err("Symlinks path reveal is already enabled")
+ }
+ fsrv.Browse.RevealSymlinks = true
+ case "sort":
+ for d.NextArg() {
+ dVal := d.Val()
+ switch dVal {
+ case sortByName, sortByNameDirFirst, sortBySize, sortByTime, sortOrderAsc, sortOrderDesc:
+ fsrv.Browse.SortOptions = append(fsrv.Browse.SortOptions, dVal)
+ default:
+ return d.Errf("unknown sort option '%s'", dVal)
+ }
+ }
+ case "file_limit":
+ fileLimit := d.RemainingArgs()
+ if len(fileLimit) != 1 {
+ return d.Err("file_limit should have an integer value")
+ }
+ val, _ := strconv.Atoi(fileLimit[0])
+ if fsrv.Browse.FileLimit != 0 {
+ return d.Err("file_limit is already enabled")
+ }
+ fsrv.Browse.FileLimit = val
+ default:
+ return d.Errf("unknown subdirective '%s'", d.Val())
+ }
+ }
+
+ case "precompressed":
+ fsrv.PrecompressedOrder = d.RemainingArgs()
+ if len(fsrv.PrecompressedOrder) == 0 {
+ fsrv.PrecompressedOrder = []string{"br", "zstd", "gzip"}
+ }
+
+ for _, format := range fsrv.PrecompressedOrder {
+ modID := "http.precompressed." + format
+ mod, err := caddy.GetModule(modID)
+ if err != nil {
+ return d.Errf("getting module named '%s': %v", modID, err)
+ }
+ inst := mod.New()
+ precompress, ok := inst.(encode.Precompressed)
+ if !ok {
+ return d.Errf("module %s is not a precompressor; is %T", modID, inst)
+ }
+ if fsrv.PrecompressedRaw == nil {
+ fsrv.PrecompressedRaw = make(caddy.ModuleMap)
+ }
+ fsrv.PrecompressedRaw[format] = caddyconfig.JSON(precompress, nil)
+ }
+
+ case "status":
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ fsrv.StatusCode = caddyhttp.WeakString(d.Val())
+
+ case "disable_canonical_uris":
+ if d.NextArg() {
+ return d.ArgErr()
+ }
+ falseBool := false
+ fsrv.CanonicalURIs = &falseBool
+
+ case "pass_thru":
+ if d.NextArg() {
+ return d.ArgErr()
+ }
+ fsrv.PassThru = true
+
+ case "etag_file_extensions":
+ etagFileExtensions := d.RemainingArgs()
+ if len(etagFileExtensions) == 0 {
+ return d.ArgErr()
+ }
+ fsrv.EtagFileExtensions = etagFileExtensions
+
+ default:
+ return d.Errf("unknown subdirective '%s'", d.Val())
+ }
+ }
+
+ return nil
+}
+
+// FinalizeUnmarshalCaddyfile finalizes the Caddyfile parsing which
+// requires having an httpcaddyfile.Helper to function, to setup hidden Caddyfiles.
+func (fsrv *FileServer) FinalizeUnmarshalCaddyfile(h httpcaddyfile.Helper) error {
+ // Hide the Caddyfile (and any imported Caddyfiles).
+ // This needs to be done in here instead of UnmarshalCaddyfile
+ // because UnmarshalCaddyfile only has access to the dispenser
+ // and not the helper, and only the helper has access to the
+ // Caddyfiles function.
+ if configFiles := h.Caddyfiles(); len(configFiles) > 0 {
+ for _, file := range configFiles {
+ file = filepath.Clean(file)
+ if !fileHidden(file, fsrv.Hide) {
+ // if there's no path separator, the file server module will hide all
+ // files by that name, rather than a specific one; but we want to hide
+ // only this specific file, so ensure there's always a path separator
+ if !strings.Contains(file, separator) {
+ file = "." + separator + file
+ }
+ fsrv.Hide = append(fsrv.Hide, file)
+ }
+ }
+ }
+ return nil
+}
+
+// parseTryFiles parses the try_files directive. It combines a file matcher
+// with a rewrite directive, so this is not a standard handler directive.
+// A try_files directive has this syntax (notice no matcher tokens accepted):
+//
+// try_files {
+// policy first_exist|smallest_size|largest_size|most_recently_modified
+// }
+//
+// and is basically shorthand for:
+//
+// @try_files file {
+// try_files
+// policy first_exist|smallest_size|largest_size|most_recently_modified
+// }
+// rewrite @try_files {http.matchers.file.relative}
+//
+// This directive rewrites request paths only, preserving any other part
+// of the URI, unless the part is explicitly given in the file list. For
+// example, if any of the files in the list have a query string:
+//
+// try_files {path} index.php?{query}&p={path}
+//
+// then the query string will not be treated as part of the file name; and
+// if that file matches, the given query string will replace any query string
+// that already exists on the request URI.
+func parseTryFiles(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) {
+ if !h.Next() {
+ return nil, h.ArgErr()
+ }
+
+ tryFiles := h.RemainingArgs()
+ if len(tryFiles) == 0 {
+ return nil, h.ArgErr()
+ }
+
+ // parse out the optional try policy
+ var tryPolicy string
+ for h.NextBlock(0) {
+ switch h.Val() {
+ case "policy":
+ if tryPolicy != "" {
+ return nil, h.Err("try policy already configured")
+ }
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ tryPolicy = h.Val()
+
+ switch tryPolicy {
+ case tryPolicyFirstExist, tryPolicyFirstExistFallback, tryPolicyLargestSize, tryPolicySmallestSize, tryPolicyMostRecentlyMod:
+ default:
+ return nil, h.Errf("unrecognized try policy: %s", tryPolicy)
+ }
+ }
+ }
+
+ // makeRoute returns a route that tries the files listed in try
+ // and then rewrites to the matched file; userQueryString is
+ // appended to the rewrite rule.
+ makeRoute := func(try []string, userQueryString string) []httpcaddyfile.ConfigValue {
+ handler := rewrite.Rewrite{
+ URI: "{http.matchers.file.relative}" + userQueryString,
+ }
+ matcherSet := caddy.ModuleMap{
+ "file": h.JSON(MatchFile{TryFiles: try, TryPolicy: tryPolicy}),
+ }
+ return h.NewRoute(matcherSet, handler)
+ }
+
+ var result []httpcaddyfile.ConfigValue
+
+ // if there are query strings in the list, we have to split into
+ // a separate route for each item with a query string, because
+ // the rewrite is different for that item
+ try := make([]string, 0, len(tryFiles))
+ for _, item := range tryFiles {
+ if idx := strings.Index(item, "?"); idx >= 0 {
+ if len(try) > 0 {
+ result = append(result, makeRoute(try, "")...)
+ try = []string{}
+ }
+ result = append(result, makeRoute([]string{item[:idx]}, item[idx:])...)
+ continue
+ }
+ // accumulate consecutive non-query-string parameters
+ try = append(try, item)
+ }
+ if len(try) > 0 {
+ result = append(result, makeRoute(try, "")...)
+ }
+
+ // ensure that multiple routes (possible if rewrite targets
+ // have query strings, for example) are grouped together
+ // so only the first matching rewrite is performed (#2891)
+ h.GroupRoutes(result)
+
+ return result, nil
+}
+
+var _ caddyfile.Unmarshaler = (*FileServer)(nil)
diff --git a/modules/caddyhttp/fileserver/command.go b/modules/caddyhttp/fileserver/command.go
new file mode 100644
index 00000000000..a04d7cade07
--- /dev/null
+++ b/modules/caddyhttp/fileserver/command.go
@@ -0,0 +1,224 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileserver
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "strconv"
+ "time"
+
+ "github.com/caddyserver/certmagic"
+ "github.com/spf13/cobra"
+ "go.uber.org/zap"
+
+ caddycmd "github.com/caddyserver/caddy/v2/cmd"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
+ caddytpl "github.com/caddyserver/caddy/v2/modules/caddyhttp/templates"
+)
+
+func init() {
+ caddycmd.RegisterCommand(caddycmd.Command{
+ Name: "file-server",
+ Usage: "[--domain ] [--root ] [--listen ] [--browse] [--reveal-symlinks] [--access-log] [--precompressed]",
+ Short: "Spins up a production-ready file server",
+ Long: `
+A simple but production-ready file server. Useful for quick deployments,
+demos, and development.
+
+The listener's socket address can be customized with the --listen flag.
+
+If a domain name is specified with --domain, the default listener address
+will be changed to the HTTPS port and the server will use HTTPS. If using
+a public domain, ensure A/AAAA records are properly configured before
+using this option.
+
+By default, Zstandard and Gzip compression are enabled. Use --no-compress
+to disable compression.
+
+If --browse is enabled, requests for folders without an index file will
+respond with a file listing.`,
+ CobraFunc: func(cmd *cobra.Command) {
+ cmd.Flags().StringP("domain", "d", "", "Domain name at which to serve the files")
+ cmd.Flags().StringP("root", "r", "", "The path to the root of the site")
+ cmd.Flags().StringP("listen", "l", "", "The address to which to bind the listener")
+ cmd.Flags().BoolP("browse", "b", false, "Enable directory browsing")
+ cmd.Flags().BoolP("reveal-symlinks", "", false, "Show symlink paths when browse is enabled.")
+ cmd.Flags().BoolP("templates", "t", false, "Enable template rendering")
+ cmd.Flags().BoolP("access-log", "a", false, "Enable the access log")
+ cmd.Flags().BoolP("debug", "v", false, "Enable verbose debug logs")
+ cmd.Flags().IntP("file-limit", "f", defaultDirEntryLimit, "Max directories to read")
+ cmd.Flags().BoolP("no-compress", "", false, "Disable Zstandard and Gzip compression")
+ cmd.Flags().StringSliceP("precompressed", "p", []string{}, "Specify precompression file extensions. Compression preference implied from flag order.")
+ cmd.RunE = caddycmd.WrapCommandFuncForCobra(cmdFileServer)
+ cmd.AddCommand(&cobra.Command{
+ Use: "export-template",
+ Short: "Exports the default file browser template",
+ Example: "caddy file-server export-template > browse.html",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ _, err := io.WriteString(os.Stdout, BrowseTemplate)
+ return err
+ },
+ })
+ },
+ })
+}
+
+func cmdFileServer(fs caddycmd.Flags) (int, error) {
+ caddy.TrapSignals()
+
+ domain := fs.String("domain")
+ root := fs.String("root")
+ listen := fs.String("listen")
+ browse := fs.Bool("browse")
+ templates := fs.Bool("templates")
+ accessLog := fs.Bool("access-log")
+ fileLimit := fs.Int("file-limit")
+ debug := fs.Bool("debug")
+ revealSymlinks := fs.Bool("reveal-symlinks")
+ compress := !fs.Bool("no-compress")
+ precompressed, err := fs.GetStringSlice("precompressed")
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid precompressed flag: %v", err)
+ }
+ var handlers []json.RawMessage
+
+ if compress {
+ zstd, err := caddy.GetModule("http.encoders.zstd")
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ gzip, err := caddy.GetModule("http.encoders.gzip")
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ handlers = append(handlers, caddyconfig.JSONModuleObject(encode.Encode{
+ EncodingsRaw: caddy.ModuleMap{
+ "zstd": caddyconfig.JSON(zstd.New(), nil),
+ "gzip": caddyconfig.JSON(gzip.New(), nil),
+ },
+ Prefer: []string{"zstd", "gzip"},
+ }, "handler", "encode", nil))
+ }
+
+ if templates {
+ handler := caddytpl.Templates{FileRoot: root}
+ handlers = append(handlers, caddyconfig.JSONModuleObject(handler, "handler", "templates", nil))
+ }
+
+ handler := FileServer{Root: root}
+
+ if len(precompressed) != 0 {
+ // logic mirrors modules/caddyhttp/fileserver/caddyfile.go case "precompressed"
+ var order []string
+ for _, compression := range precompressed {
+ modID := "http.precompressed." + compression
+ mod, err := caddy.GetModule(modID)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("getting module named '%s': %v", modID, err)
+ }
+ inst := mod.New()
+ precompress, ok := inst.(encode.Precompressed)
+ if !ok {
+ return caddy.ExitCodeFailedStartup, fmt.Errorf("module %s is not a precompressor; is %T", modID, inst)
+ }
+ if handler.PrecompressedRaw == nil {
+ handler.PrecompressedRaw = make(caddy.ModuleMap)
+ }
+ handler.PrecompressedRaw[compression] = caddyconfig.JSON(precompress, nil)
+ order = append(order, compression)
+ }
+ handler.PrecompressedOrder = order
+ }
+
+ if browse {
+ handler.Browse = &Browse{RevealSymlinks: revealSymlinks, FileLimit: fileLimit}
+ }
+
+ handlers = append(handlers, caddyconfig.JSONModuleObject(handler, "handler", "file_server", nil))
+
+ route := caddyhttp.Route{HandlersRaw: handlers}
+
+ if domain != "" {
+ route.MatcherSetsRaw = []caddy.ModuleMap{
+ {
+ "host": caddyconfig.JSON(caddyhttp.MatchHost{domain}, nil),
+ },
+ }
+ }
+
+ server := &caddyhttp.Server{
+ ReadHeaderTimeout: caddy.Duration(10 * time.Second),
+ IdleTimeout: caddy.Duration(30 * time.Second),
+ MaxHeaderBytes: 1024 * 10,
+ Routes: caddyhttp.RouteList{route},
+ }
+ if listen == "" {
+ if domain == "" {
+ listen = ":80"
+ } else {
+ listen = ":" + strconv.Itoa(certmagic.HTTPSPort)
+ }
+ }
+ server.Listen = []string{listen}
+ if accessLog {
+ server.Logs = &caddyhttp.ServerLogConfig{}
+ }
+
+ httpApp := caddyhttp.App{
+ Servers: map[string]*caddyhttp.Server{"static": server},
+ }
+
+ var false bool
+ cfg := &caddy.Config{
+ Admin: &caddy.AdminConfig{
+ Disabled: true,
+ Config: &caddy.ConfigSettings{
+ Persist: &false,
+ },
+ },
+ AppsRaw: caddy.ModuleMap{
+ "http": caddyconfig.JSON(httpApp, nil),
+ },
+ }
+
+ if debug {
+ cfg.Logging = &caddy.Logging{
+ Logs: map[string]*caddy.CustomLog{
+ "default": {
+ BaseLog: caddy.BaseLog{Level: zap.DebugLevel.CapitalString()},
+ },
+ },
+ }
+ }
+
+ err = caddy.Run(cfg)
+ if err != nil {
+ return caddy.ExitCodeFailedStartup, err
+ }
+
+ log.Printf("Caddy serving static files on %s", listen)
+
+ select {}
+}
diff --git a/modules/caddyhttp/fileserver/matcher.go b/modules/caddyhttp/fileserver/matcher.go
new file mode 100644
index 00000000000..2bc665d4f92
--- /dev/null
+++ b/modules/caddyhttp/fileserver/matcher.go
@@ -0,0 +1,737 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileserver
+
+import (
+ "fmt"
+ "io/fs"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/parser"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ caddy.RegisterModule(MatchFile{})
+}
+
+// MatchFile is an HTTP request matcher that can match
+// requests based upon file existence.
+//
+// Upon matching, three new placeholders will be made
+// available:
+//
+// - `{http.matchers.file.relative}` The root-relative
+// path of the file. This is often useful when rewriting
+// requests.
+// - `{http.matchers.file.absolute}` The absolute path
+// of the matched file.
+// - `{http.matchers.file.type}` Set to "directory" if
+// the matched file is a directory, "file" otherwise.
+// - `{http.matchers.file.remainder}` Set to the remainder
+// of the path if the path was split by `split_path`.
+//
+// Even though file matching may depend on the OS path
+// separator, the placeholder values always use /.
+type MatchFile struct {
+ // The file system implementation to use. By default, the
+ // local disk file system will be used.
+ FileSystem string `json:"fs,omitempty"`
+
+ // The root directory, used for creating absolute
+ // file paths, and required when working with
+ // relative paths; if not specified, `{http.vars.root}`
+ // will be used, if set; otherwise, the current
+ // directory is assumed. Accepts placeholders.
+ Root string `json:"root,omitempty"`
+
+ // The list of files to try. Each path here is
+ // considered related to Root. If nil, the request
+ // URL's path will be assumed. Files and
+ // directories are treated distinctly, so to match
+ // a directory, the filepath MUST end in a forward
+ // slash `/`. To match a regular file, there must
+ // be no trailing slash. Accepts placeholders. If
+ // the policy is "first_exist", then an error may
+ // be triggered as a fallback by configuring "="
+ // followed by a status code number,
+ // for example "=404".
+ TryFiles []string `json:"try_files,omitempty"`
+
+ // How to choose a file in TryFiles. Can be:
+ //
+ // - first_exist
+ // - first_exist_fallback
+ // - smallest_size
+ // - largest_size
+ // - most_recently_modified
+ //
+ // Default is first_exist.
+ TryPolicy string `json:"try_policy,omitempty"`
+
+ // A list of delimiters to use to split the path in two
+ // when trying files. If empty, no splitting will
+ // occur, and the path will be tried as-is. For each
+ // split value, the left-hand side of the split,
+ // including the split value, will be the path tried.
+ // For example, the path `/remote.php/dav/` using the
+ // split value `.php` would try the file `/remote.php`.
+ // Each delimiter must appear at the end of a URI path
+ // component in order to be used as a split delimiter.
+ SplitPath []string `json:"split_path,omitempty"`
+
+ fsmap caddy.FileSystems
+
+ logger *zap.Logger
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchFile) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.file",
+ New: func() caddy.Module { return new(MatchFile) },
+ }
+}
+
+// UnmarshalCaddyfile sets up the matcher from Caddyfile tokens. Syntax:
+//
+// file {
+// root
+// try_files
+// try_policy first_exist|smallest_size|largest_size|most_recently_modified
+// }
+func (m *MatchFile) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ m.TryFiles = append(m.TryFiles, d.RemainingArgs()...)
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "root":
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ m.Root = d.Val()
+ case "try_files":
+ m.TryFiles = append(m.TryFiles, d.RemainingArgs()...)
+ if len(m.TryFiles) == 0 {
+ return d.ArgErr()
+ }
+ case "try_policy":
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ m.TryPolicy = d.Val()
+ case "split_path":
+ m.SplitPath = d.RemainingArgs()
+ if len(m.SplitPath) == 0 {
+ return d.ArgErr()
+ }
+ default:
+ return d.Errf("unrecognized subdirective: %s", d.Val())
+ }
+ }
+ }
+ return nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression file()
+// expression file({http.request.uri.path}, '/index.php')
+// expression file({'root': '/srv', 'try_files': [{http.request.uri.path}, '/index.php'], 'try_policy': 'first_exist', 'split_path': ['.php']})
+func (MatchFile) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ requestType := cel.ObjectType("http.Request")
+
+ matcherFactory := func(data ref.Val) (caddyhttp.RequestMatcherWithError, error) {
+ values, err := caddyhttp.CELValueToMapStrList(data)
+ if err != nil {
+ return nil, err
+ }
+
+ var root string
+ if len(values["root"]) > 0 {
+ root = values["root"][0]
+ }
+
+ var fsName string
+ if len(values["fs"]) > 0 {
+ fsName = values["fs"][0]
+ }
+
+ var try_policy string
+ if len(values["try_policy"]) > 0 {
+ try_policy = values["try_policy"][0]
+ }
+
+ m := MatchFile{
+ Root: root,
+ TryFiles: values["try_files"],
+ TryPolicy: try_policy,
+ SplitPath: values["split_path"],
+ FileSystem: fsName,
+ }
+
+ err = m.Provision(ctx)
+ return m, err
+ }
+
+ envOptions := []cel.EnvOption{
+ cel.Macros(parser.NewGlobalVarArgMacro("file", celFileMatcherMacroExpander())),
+ cel.Function("file", cel.Overload("file_request_map", []*cel.Type{requestType, caddyhttp.CELTypeJSON}, cel.BoolType)),
+ cel.Function("file_request_map",
+ cel.Overload("file_request_map", []*cel.Type{requestType, caddyhttp.CELTypeJSON}, cel.BoolType),
+ cel.SingletonBinaryBinding(caddyhttp.CELMatcherRuntimeFunction("file_request_map", matcherFactory))),
+ }
+
+ programOptions := []cel.ProgramOption{
+ cel.CustomDecorator(caddyhttp.CELMatcherDecorator("file_request_map", matcherFactory)),
+ }
+
+ return caddyhttp.NewMatcherCELLibrary(envOptions, programOptions), nil
+}
+
+func celFileMatcherMacroExpander() parser.MacroExpander {
+ return func(eh parser.ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ if len(args) == 0 {
+ return eh.NewCall("file",
+ eh.NewIdent(caddyhttp.CELRequestVarName),
+ eh.NewMap(),
+ ), nil
+ }
+ if len(args) == 1 {
+ arg := args[0]
+ if isCELStringLiteral(arg) || isCELCaddyPlaceholderCall(arg) {
+ return eh.NewCall("file",
+ eh.NewIdent(caddyhttp.CELRequestVarName),
+ eh.NewMap(eh.NewMapEntry(
+ eh.NewLiteral(types.String("try_files")),
+ eh.NewList(arg),
+ false,
+ )),
+ ), nil
+ }
+ if isCELTryFilesLiteral(arg) {
+ return eh.NewCall("file", eh.NewIdent(caddyhttp.CELRequestVarName), arg), nil
+ }
+ return nil, &common.Error{
+ Location: eh.OffsetLocation(arg.ID()),
+ Message: "matcher requires either a map or string literal argument",
+ }
+ }
+
+ for _, arg := range args {
+ if !(isCELStringLiteral(arg) || isCELCaddyPlaceholderCall(arg)) {
+ return nil, &common.Error{
+ Location: eh.OffsetLocation(arg.ID()),
+ Message: "matcher only supports repeated string literal arguments",
+ }
+ }
+ }
+ return eh.NewCall("file",
+ eh.NewIdent(caddyhttp.CELRequestVarName),
+ eh.NewMap(eh.NewMapEntry(
+ eh.NewLiteral(types.String("try_files")),
+ eh.NewList(args...),
+ false,
+ )),
+ ), nil
+ }
+}
+
+// Provision sets up m's defaults.
+func (m *MatchFile) Provision(ctx caddy.Context) error {
+ m.logger = ctx.Logger()
+
+ m.fsmap = ctx.Filesystems()
+
+ if m.Root == "" {
+ m.Root = "{http.vars.root}"
+ }
+
+ if m.FileSystem == "" {
+ m.FileSystem = "{http.vars.fs}"
+ }
+
+ // if list of files to try was omitted entirely, assume URL path
+ // (use placeholder instead of r.URL.Path; see issue #4146)
+ if m.TryFiles == nil {
+ m.TryFiles = []string{"{http.request.uri.path}"}
+ }
+ return nil
+}
+
+// Validate ensures m has a valid configuration.
+func (m MatchFile) Validate() error {
+ switch m.TryPolicy {
+ case "",
+ tryPolicyFirstExist,
+ tryPolicyFirstExistFallback,
+ tryPolicyLargestSize,
+ tryPolicySmallestSize,
+ tryPolicyMostRecentlyMod:
+ default:
+ return fmt.Errorf("unknown try policy %s", m.TryPolicy)
+ }
+ return nil
+}
+
+// Match returns true if r matches m. Returns true
+// if a file was matched. If so, four placeholders
+// will be available:
+// - http.matchers.file.relative: Path to file relative to site root
+// - http.matchers.file.absolute: Path to file including site root
+// - http.matchers.file.type: file or directory
+// - http.matchers.file.remainder: Portion remaining after splitting file path (if configured)
+func (m MatchFile) Match(r *http.Request) bool {
+ match, err := m.selectFile(r)
+ if err != nil {
+ // nolint:staticcheck
+ caddyhttp.SetVar(r.Context(), caddyhttp.MatcherErrorVarKey, err)
+ }
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchFile) MatchWithError(r *http.Request) (bool, error) {
+ return m.selectFile(r)
+}
+
+// selectFile chooses a file according to m.TryPolicy by appending
+// the paths in m.TryFiles to m.Root, with placeholder replacements.
+func (m MatchFile) selectFile(r *http.Request) (bool, error) {
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+
+ root := filepath.Clean(repl.ReplaceAll(m.Root, "."))
+
+ fsName := repl.ReplaceAll(m.FileSystem, "")
+
+ fileSystem, ok := m.fsmap.Get(fsName)
+ if !ok {
+ if c := m.logger.Check(zapcore.ErrorLevel, "use of unregistered filesystem"); c != nil {
+ c.Write(zap.String("fs", fsName))
+ }
+ return false, nil
+ }
+ type matchCandidate struct {
+ fullpath, relative, splitRemainder string
+ }
+
+ // makeCandidates evaluates placeholders in file and expands any glob expressions
+ // to build a list of file candidates. Special glob characters are escaped in
+ // placeholder replacements so globs cannot be expanded from placeholders, and
+ // globs are not evaluated on Windows because of its path separator character:
+ // escaping is not supported so we can't safely glob on Windows, or we can't
+ // support placeholders on Windows (pick one). (Actually, evaluating untrusted
+ // globs is not the end of the world since the file server will still hide any
+ // hidden files, it just might lead to unexpected behavior.)
+ makeCandidates := func(file string) []matchCandidate {
+ // first, evaluate placeholders in the file pattern
+ expandedFile, err := repl.ReplaceFunc(file, func(variable string, val any) (any, error) {
+ if runtime.GOOS == "windows" {
+ return val, nil
+ }
+ switch v := val.(type) {
+ case string:
+ return globSafeRepl.Replace(v), nil
+ case fmt.Stringer:
+ return globSafeRepl.Replace(v.String()), nil
+ }
+ return val, nil
+ })
+ if err != nil {
+ if c := m.logger.Check(zapcore.ErrorLevel, "evaluating placeholders"); c != nil {
+ c.Write(zap.Error(err))
+ }
+
+ expandedFile = file // "oh well," I guess?
+ }
+
+ // clean the path and split, if configured -- we must split before
+ // globbing so that the file system doesn't include the remainder
+ // ("afterSplit") in the filename; be sure to restore trailing slash
+ beforeSplit, afterSplit := m.firstSplit(path.Clean(expandedFile))
+ if strings.HasSuffix(file, "/") {
+ beforeSplit += "/"
+ }
+
+ // create the full path to the file by prepending the site root
+ fullPattern := caddyhttp.SanitizedPathJoin(root, beforeSplit)
+
+ // expand glob expressions, but not on Windows because Glob() doesn't
+ // support escaping on Windows due to path separator)
+ var globResults []string
+ if runtime.GOOS == "windows" {
+ globResults = []string{fullPattern} // precious Windows
+ } else {
+ globResults, err = fs.Glob(fileSystem, fullPattern)
+ if err != nil {
+ if c := m.logger.Check(zapcore.ErrorLevel, "expanding glob"); c != nil {
+ c.Write(zap.Error(err))
+ }
+ }
+ }
+
+ // for each glob result, combine all the forms of the path
+ var candidates []matchCandidate
+ for _, result := range globResults {
+ candidates = append(candidates, matchCandidate{
+ fullpath: result,
+ relative: strings.TrimPrefix(result, root),
+ splitRemainder: afterSplit,
+ })
+ }
+
+ return candidates
+ }
+
+ // setPlaceholders creates the placeholders for the matched file
+ setPlaceholders := func(candidate matchCandidate, isDir bool) {
+ repl.Set("http.matchers.file.relative", filepath.ToSlash(candidate.relative))
+ repl.Set("http.matchers.file.absolute", filepath.ToSlash(candidate.fullpath))
+ repl.Set("http.matchers.file.remainder", filepath.ToSlash(candidate.splitRemainder))
+
+ fileType := "file"
+ if isDir {
+ fileType = "directory"
+ }
+ repl.Set("http.matchers.file.type", fileType)
+ }
+
+ // match file according to the configured policy
+ switch m.TryPolicy {
+ case "", tryPolicyFirstExist, tryPolicyFirstExistFallback:
+ maxI := -1
+ if m.TryPolicy == tryPolicyFirstExistFallback {
+ maxI = len(m.TryFiles) - 1
+ }
+
+ for i, pattern := range m.TryFiles {
+ // If the pattern is a status code, emit an error,
+ // which short-circuits the middleware pipeline and
+ // writes an HTTP error response.
+ if err := parseErrorCode(pattern); err != nil {
+ return false, err
+ }
+
+ candidates := makeCandidates(pattern)
+ for _, c := range candidates {
+ // Skip the IO if using fallback policy and it's the latest item
+ if i == maxI {
+ setPlaceholders(c, false)
+
+ return true, nil
+ }
+
+ if info, exists := m.strictFileExists(fileSystem, c.fullpath); exists {
+ setPlaceholders(c, info.IsDir())
+ return true, nil
+ }
+ }
+ }
+
+ case tryPolicyLargestSize:
+ var largestSize int64
+ var largest matchCandidate
+ var largestInfo os.FileInfo
+ for _, pattern := range m.TryFiles {
+ candidates := makeCandidates(pattern)
+ for _, c := range candidates {
+ info, err := fs.Stat(fileSystem, c.fullpath)
+ if err == nil && info.Size() > largestSize {
+ largestSize = info.Size()
+ largest = c
+ largestInfo = info
+ }
+ }
+ }
+ if largestInfo == nil {
+ return false, nil
+ }
+ setPlaceholders(largest, largestInfo.IsDir())
+ return true, nil
+
+ case tryPolicySmallestSize:
+ var smallestSize int64
+ var smallest matchCandidate
+ var smallestInfo os.FileInfo
+ for _, pattern := range m.TryFiles {
+ candidates := makeCandidates(pattern)
+ for _, c := range candidates {
+ info, err := fs.Stat(fileSystem, c.fullpath)
+ if err == nil && (smallestSize == 0 || info.Size() < smallestSize) {
+ smallestSize = info.Size()
+ smallest = c
+ smallestInfo = info
+ }
+ }
+ }
+ if smallestInfo == nil {
+ return false, nil
+ }
+ setPlaceholders(smallest, smallestInfo.IsDir())
+ return true, nil
+
+ case tryPolicyMostRecentlyMod:
+ var recent matchCandidate
+ var recentInfo os.FileInfo
+ for _, pattern := range m.TryFiles {
+ candidates := makeCandidates(pattern)
+ for _, c := range candidates {
+ info, err := fs.Stat(fileSystem, c.fullpath)
+ if err == nil &&
+ (recentInfo == nil || info.ModTime().After(recentInfo.ModTime())) {
+ recent = c
+ recentInfo = info
+ }
+ }
+ }
+ if recentInfo == nil {
+ return false, nil
+ }
+ setPlaceholders(recent, recentInfo.IsDir())
+ return true, nil
+ }
+
+ return false, nil
+}
+
+// parseErrorCode checks if the input is a status
+// code number, prefixed by "=", and returns an
+// error if so.
+func parseErrorCode(input string) error {
+ if len(input) > 1 && input[0] == '=' {
+ code, err := strconv.Atoi(input[1:])
+ if err != nil || code < 100 || code > 999 {
+ return nil
+ }
+ return caddyhttp.Error(code, fmt.Errorf("%s", input[1:]))
+ }
+ return nil
+}
+
+// strictFileExists returns true if file exists
+// and matches the convention of the given file
+// path. If the path ends in a forward slash,
+// the file must also be a directory; if it does
+// NOT end in a forward slash, the file must NOT
+// be a directory.
+func (m MatchFile) strictFileExists(fileSystem fs.FS, file string) (os.FileInfo, bool) {
+ info, err := fs.Stat(fileSystem, file)
+ if err != nil {
+ // in reality, this can be any error
+ // such as permission or even obscure
+ // ones like "is not a directory" (when
+ // trying to stat a file within a file);
+ // in those cases we can't be sure if
+ // the file exists, so we just treat any
+ // error as if it does not exist; see
+ // https://stackoverflow.com/a/12518877/1048862
+ return nil, false
+ }
+ if strings.HasSuffix(file, separator) {
+ // by convention, file paths ending
+ // in a path separator must be a directory
+ return info, info.IsDir()
+ }
+ // by convention, file paths NOT ending
+ // in a path separator must NOT be a directory
+ return info, !info.IsDir()
+}
+
+// firstSplit returns the first result where the path
+// can be split in two by a value in m.SplitPath. The
+// return values are the first piece of the path that
+// ends with the split substring and the remainder.
+// If the path cannot be split, the path is returned
+// as-is (with no remainder).
+func (m MatchFile) firstSplit(path string) (splitPart, remainder string) {
+ for _, split := range m.SplitPath {
+ if idx := indexFold(path, split); idx > -1 {
+ pos := idx + len(split)
+ // skip the split if it's not the final part of the filename
+ if pos != len(path) && !strings.HasPrefix(path[pos:], "/") {
+ continue
+ }
+ return path[:pos], path[pos:]
+ }
+ }
+ return path, ""
+}
+
+// There is no strings.IndexFold() function like there is strings.EqualFold(),
+// but we can use strings.EqualFold() to build our own case-insensitive
+// substring search (as of Go 1.14).
+func indexFold(haystack, needle string) int {
+ nlen := len(needle)
+ for i := 0; i+nlen < len(haystack); i++ {
+ if strings.EqualFold(haystack[i:i+nlen], needle) {
+ return i
+ }
+ }
+ return -1
+}
+
+// isCELTryFilesLiteral returns whether the expression resolves to a map literal containing
+// only string keys with or a placeholder call.
+func isCELTryFilesLiteral(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.MapKind:
+ mapExpr := e.AsMap()
+ for _, entry := range mapExpr.Entries() {
+ mapKey := entry.AsMapEntry().Key()
+ mapVal := entry.AsMapEntry().Value()
+ if !isCELStringLiteral(mapKey) {
+ return false
+ }
+ mapKeyStr := mapKey.AsLiteral().ConvertToType(types.StringType).Value()
+ if mapKeyStr == "try_files" || mapKeyStr == "split_path" {
+ if !isCELStringListLiteral(mapVal) {
+ return false
+ }
+ } else if mapKeyStr == "try_policy" || mapKeyStr == "root" {
+ if !(isCELStringExpr(mapVal)) {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+
+ case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// isCELStringExpr indicates whether the expression is a supported string expression
+func isCELStringExpr(e ast.Expr) bool {
+ return isCELStringLiteral(e) || isCELCaddyPlaceholderCall(e) || isCELConcatCall(e)
+}
+
+// isCELStringLiteral returns whether the expression is a CEL string literal.
+func isCELStringLiteral(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.LiteralKind:
+ constant := e.AsLiteral()
+ switch constant.Type() {
+ case types.StringType:
+ return true
+ }
+ case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.MapKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// isCELCaddyPlaceholderCall returns whether the expression is a caddy placeholder call.
+func isCELCaddyPlaceholderCall(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.CallKind:
+ call := e.AsCall()
+ if call.FunctionName() == caddyhttp.CELPlaceholderFuncName {
+ return true
+ }
+ case ast.UnspecifiedExprKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// isCELConcatCall tests whether the expression is a concat function (+) with string, placeholder, or
+// other concat call arguments.
+func isCELConcatCall(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.CallKind:
+ call := e.AsCall()
+ if call.Target().Kind() != ast.UnspecifiedExprKind {
+ return false
+ }
+ if call.FunctionName() != operators.Add {
+ return false
+ }
+ for _, arg := range call.Args() {
+ if !isCELStringExpr(arg) {
+ return false
+ }
+ }
+ return true
+ case ast.UnspecifiedExprKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// isCELStringListLiteral returns whether the expression resolves to a list literal
+// containing only string constants or a placeholder call.
+func isCELStringListLiteral(e ast.Expr) bool {
+ switch e.Kind() {
+ case ast.ListKind:
+ list := e.AsList()
+ for _, elem := range list.Elements() {
+ if !isCELStringExpr(elem) {
+ return false
+ }
+ }
+ return true
+ case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind:
+ // appeasing the linter :)
+ }
+ return false
+}
+
+// globSafeRepl replaces special glob characters with escaped
+// equivalents. Note that the filepath godoc states that
+// escaping is not done on Windows because of the separator.
+var globSafeRepl = strings.NewReplacer(
+ "*", "\\*",
+ "[", "\\[",
+ "?", "\\?",
+)
+
+const (
+ tryPolicyFirstExist = "first_exist"
+ tryPolicyFirstExistFallback = "first_exist_fallback"
+ tryPolicyLargestSize = "largest_size"
+ tryPolicySmallestSize = "smallest_size"
+ tryPolicyMostRecentlyMod = "most_recently_modified"
+)
+
+// Interface guards
+var (
+ _ caddy.Validator = (*MatchFile)(nil)
+ _ caddyhttp.RequestMatcherWithError = (*MatchFile)(nil)
+ _ caddyhttp.CELLibraryProducer = (*MatchFile)(nil)
+)
diff --git a/modules/caddyhttp/fileserver/matcher_test.go b/modules/caddyhttp/fileserver/matcher_test.go
new file mode 100644
index 00000000000..b6697b9d880
--- /dev/null
+++ b/modules/caddyhttp/fileserver/matcher_test.go
@@ -0,0 +1,418 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileserver
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "runtime"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/internal/filesystems"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func TestFileMatcher(t *testing.T) {
+ // Windows doesn't like colons in files names
+ isWindows := runtime.GOOS == "windows"
+ if !isWindows {
+ filename := "with:in-name.txt"
+ f, err := os.Create("./testdata/" + filename)
+ if err != nil {
+ t.Fail()
+ return
+ }
+ t.Cleanup(func() {
+ os.Remove("./testdata/" + filename)
+ })
+ f.WriteString(filename)
+ f.Close()
+ }
+
+ for i, tc := range []struct {
+ path string
+ expectedPath string
+ expectedType string
+ matched bool
+ }{
+ {
+ path: "/foo.txt",
+ expectedPath: "/foo.txt",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/foo.txt/",
+ expectedPath: "/foo.txt",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/foo.txt?a=b",
+ expectedPath: "/foo.txt",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/foodir",
+ expectedPath: "/foodir/",
+ expectedType: "directory",
+ matched: true,
+ },
+ {
+ path: "/foodir/",
+ expectedPath: "/foodir/",
+ expectedType: "directory",
+ matched: true,
+ },
+ {
+ path: "/foodir/foo.txt",
+ expectedPath: "/foodir/foo.txt",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/missingfile.php",
+ matched: false,
+ },
+ {
+ path: "ملف.txt", // the path file name is not escaped
+ expectedPath: "/ملف.txt",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: url.PathEscape("ملف.txt"), // singly-escaped path
+ expectedPath: "/ملف.txt",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: url.PathEscape(url.PathEscape("ملف.txt")), // doubly-escaped path
+ expectedPath: "/%D9%85%D9%84%D9%81.txt",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "./with:in-name.txt", // browsers send the request with the path as such
+ expectedPath: "/with:in-name.txt",
+ expectedType: "file",
+ matched: !isWindows,
+ },
+ } {
+ m := &MatchFile{
+ fsmap: &filesystems.FilesystemMap{},
+ Root: "./testdata",
+ TryFiles: []string{"{http.request.uri.path}", "{http.request.uri.path}/"},
+ }
+
+ u, err := url.Parse(tc.path)
+ if err != nil {
+ t.Errorf("Test %d: parsing path: %v", i, err)
+ }
+
+ req := &http.Request{URL: u}
+ repl := caddyhttp.NewTestReplacer(req)
+
+ result, err := m.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d: unexpected error: %v", i, err)
+ }
+ if result != tc.matched {
+ t.Errorf("Test %d: expected match=%t, got %t", i, tc.matched, result)
+ }
+
+ rel, ok := repl.Get("http.matchers.file.relative")
+ if !ok && result {
+ t.Errorf("Test %d: expected replacer value", i)
+ }
+ if !result {
+ continue
+ }
+
+ if rel != tc.expectedPath {
+ t.Errorf("Test %d: actual path: %v, expected: %v", i, rel, tc.expectedPath)
+ }
+
+ fileType, _ := repl.Get("http.matchers.file.type")
+ if fileType != tc.expectedType {
+ t.Errorf("Test %d: actual file type: %v, expected: %v", i, fileType, tc.expectedType)
+ }
+ }
+}
+
+func TestPHPFileMatcher(t *testing.T) {
+ for i, tc := range []struct {
+ path string
+ expectedPath string
+ expectedType string
+ matched bool
+ }{
+ {
+ path: "/index.php",
+ expectedPath: "/index.php",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/index.php/somewhere",
+ expectedPath: "/index.php",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/remote.php",
+ expectedPath: "/remote.php",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/remote.php/somewhere",
+ expectedPath: "/remote.php",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/missingfile.php",
+ matched: false,
+ },
+ {
+ path: "/notphp.php.txt",
+ expectedPath: "/notphp.php.txt",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/notphp.php.txt/",
+ expectedPath: "/notphp.php.txt",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ path: "/notphp.php.txt.suffixed",
+ matched: false,
+ },
+ {
+ path: "/foo.php.php/index.php",
+ expectedPath: "/foo.php.php/index.php",
+ expectedType: "file",
+ matched: true,
+ },
+ {
+ // See https://github.com/caddyserver/caddy/issues/3623
+ path: "/%E2%C3",
+ expectedPath: "/%E2%C3",
+ expectedType: "file",
+ matched: false,
+ },
+ {
+ path: "/index.php?path={path}&{query}",
+ expectedPath: "/index.php",
+ expectedType: "file",
+ matched: true,
+ },
+ } {
+ m := &MatchFile{
+ fsmap: &filesystems.FilesystemMap{},
+ Root: "./testdata",
+ TryFiles: []string{"{http.request.uri.path}", "{http.request.uri.path}/index.php"},
+ SplitPath: []string{".php"},
+ }
+
+ u, err := url.Parse(tc.path)
+ if err != nil {
+ t.Errorf("Test %d: parsing path: %v", i, err)
+ }
+
+ req := &http.Request{URL: u}
+ repl := caddyhttp.NewTestReplacer(req)
+
+ result, err := m.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d: unexpected error: %v", i, err)
+ }
+ if result != tc.matched {
+ t.Errorf("Test %d: expected match=%t, got %t", i, tc.matched, result)
+ }
+
+ rel, ok := repl.Get("http.matchers.file.relative")
+ if !ok && result {
+ t.Errorf("Test %d: expected replacer value", i)
+ }
+ if !result {
+ continue
+ }
+
+ if rel != tc.expectedPath {
+ t.Errorf("Test %d: actual path: %v, expected: %v", i, rel, tc.expectedPath)
+ }
+
+ fileType, _ := repl.Get("http.matchers.file.type")
+ if fileType != tc.expectedType {
+ t.Errorf("Test %d: actual file type: %v, expected: %v", i, fileType, tc.expectedType)
+ }
+ }
+}
+
+func TestFirstSplit(t *testing.T) {
+ m := MatchFile{
+ SplitPath: []string{".php"},
+ fsmap: &filesystems.FilesystemMap{},
+ }
+ actual, remainder := m.firstSplit("index.PHP/somewhere")
+ expected := "index.PHP"
+ expectedRemainder := "/somewhere"
+ if actual != expected {
+ t.Errorf("Expected split %s but got %s", expected, actual)
+ }
+ if remainder != expectedRemainder {
+ t.Errorf("Expected remainder %s but got %s", expectedRemainder, remainder)
+ }
+}
+
+var expressionTests = []struct {
+ name string
+ expression *caddyhttp.MatchExpression
+ urlTarget string
+ httpMethod string
+ httpHeader *http.Header
+ wantErr bool
+ wantResult bool
+ clientCertificate []byte
+ expectedPath string
+}{
+ {
+ name: "file error no args (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file()`,
+ },
+ urlTarget: "https://example.com/foo.txt",
+ wantResult: true,
+ },
+ {
+ name: "file error bad try files (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({"try_file": ["bad_arg"]})`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantErr: true,
+ },
+ {
+ name: "file match short pattern index.php (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file("index.php")`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantResult: true,
+ },
+ {
+ name: "file match short pattern foo.txt (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({http.request.uri.path})`,
+ },
+ urlTarget: "https://example.com/foo.txt",
+ wantResult: true,
+ },
+ {
+ name: "file match index.php (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({"root": "./testdata", "try_files": [{http.request.uri.path}, "/index.php"]})`,
+ },
+ urlTarget: "https://example.com/foo",
+ wantResult: true,
+ },
+ {
+ name: "file match long pattern foo.txt (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({"root": "./testdata", "try_files": [{http.request.uri.path}]})`,
+ },
+ urlTarget: "https://example.com/foo.txt",
+ wantResult: true,
+ },
+ {
+ name: "file match long pattern foo.txt with concatenation (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({"root": ".", "try_files": ["./testdata" + {http.request.uri.path}]})`,
+ },
+ urlTarget: "https://example.com/foo.txt",
+ wantResult: true,
+ },
+ {
+ name: "file not match long pattern (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({"root": "./testdata", "try_files": [{http.request.uri.path}]})`,
+ },
+ urlTarget: "https://example.com/nopenope.txt",
+ wantResult: false,
+ },
+ {
+ name: "file match long pattern foo.txt with try_policy (MatchFile)",
+ expression: &caddyhttp.MatchExpression{
+ Expr: `file({"root": "./testdata", "try_policy": "largest_size", "try_files": ["foo.txt", "large.txt"]})`,
+ },
+ urlTarget: "https://example.com/",
+ wantResult: true,
+ expectedPath: "/large.txt",
+ },
+}
+
+func TestMatchExpressionMatch(t *testing.T) {
+ for _, tst := range expressionTests {
+ tc := tst
+ t.Run(tc.name, func(t *testing.T) {
+ caddyCtx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()})
+ defer cancel()
+ err := tc.expression.Provision(caddyCtx)
+ if err != nil {
+ if !tc.wantErr {
+ t.Errorf("MatchExpression.Provision() error = %v, wantErr %v", err, tc.wantErr)
+ }
+ return
+ }
+
+ req := httptest.NewRequest(tc.httpMethod, tc.urlTarget, nil)
+ if tc.httpHeader != nil {
+ req.Header = *tc.httpHeader
+ }
+ repl := caddyhttp.NewTestReplacer(req)
+ repl.Set("http.vars.root", "./testdata")
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ matches, err := tc.expression.MatchWithError(req)
+ if err != nil {
+ t.Errorf("MatchExpression.Match() error = %v", err)
+ return
+ }
+ if matches != tc.wantResult {
+ t.Errorf("MatchExpression.Match() expected to return '%t', for expression : '%s'", tc.wantResult, tc.expression.Expr)
+ }
+
+ if tc.expectedPath != "" {
+ path, ok := repl.Get("http.matchers.file.relative")
+ if !ok {
+ t.Errorf("MatchExpression.Match() expected to return path '%s', but got none", tc.expectedPath)
+ }
+ if path != tc.expectedPath {
+ t.Errorf("MatchExpression.Match() expected to return path '%s', but got '%s'", tc.expectedPath, path)
+ }
+ }
+ })
+ }
+}
diff --git a/modules/caddyhttp/fileserver/staticfiles.go b/modules/caddyhttp/fileserver/staticfiles.go
new file mode 100644
index 00000000000..2b0caecfc99
--- /dev/null
+++ b/modules/caddyhttp/fileserver/staticfiles.go
@@ -0,0 +1,802 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileserver
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ weakrand "math/rand"
+ "mime"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode"
+)
+
+func init() {
+ caddy.RegisterModule(FileServer{})
+}
+
+// FileServer implements a handler that serves static files.
+//
+// The path of the file to serve is constructed by joining the site root
+// and the sanitized request path. Any and all files within the root and
+// links with targets outside the site root may therefore be accessed.
+// For example, with a site root of `/www`, requests to `/foo/bar.txt`
+// will serve the file at `/www/foo/bar.txt`.
+//
+// The request path is sanitized using the Go standard library's
+// path.Clean() function (https://pkg.go.dev/path#Clean) before being
+// joined to the root. Request paths must be valid and well-formed.
+//
+// For requests that access directories instead of regular files,
+// Caddy will attempt to serve an index file if present. For example,
+// a request to `/dir/` will attempt to serve `/dir/index.html` if
+// it exists. The index file names to try are configurable. If a
+// requested directory does not have an index file, Caddy writes a
+// 404 response. Alternatively, file browsing can be enabled with
+// the "browse" parameter which shows a list of files when directories
+// are requested if no index file is present. If "browse" is enabled,
+// Caddy may serve a JSON array of the directory listing when the `Accept`
+// header mentions `application/json` with the following structure:
+//
+// [{
+// "name": "",
+// "size": 0,
+// "url": "",
+// "mod_time": "",
+// "mode": 0,
+// "is_dir": false,
+// "is_symlink": false
+// }]
+//
+// with the `url` being relative to the request path and `mod_time` in the RFC 3339 format
+// with sub-second precision. For any other value for the `Accept` header, the
+// respective browse template is executed with `Content-Type: text/html`.
+//
+// By default, this handler will canonicalize URIs so that requests to
+// directories end with a slash, but requests to regular files do not.
+// This is enforced with HTTP redirects automatically and can be disabled.
+// Canonicalization redirects are not issued, however, if a URI rewrite
+// modified the last component of the path (the filename).
+//
+// This handler sets the Etag and Last-Modified headers for static files.
+// It does not perform MIME sniffing to determine Content-Type based on
+// contents, but does use the extension (if known); see the Go docs for
+// details: https://pkg.go.dev/mime#TypeByExtension
+//
+// The file server properly handles requests with If-Match,
+// If-Unmodified-Since, If-Modified-Since, If-None-Match, Range, and
+// If-Range headers. It includes the file's modification time in the
+// Last-Modified header of the response.
+type FileServer struct {
+ // The file system implementation to use. By default, Caddy uses the local
+ // disk file system.
+ //
+ // if a non default filesystem is used, it must be first be registered in the globals section.
+ FileSystem string `json:"fs,omitempty"`
+
+ // The path to the root of the site. Default is `{http.vars.root}` if set,
+ // or current working directory otherwise. This should be a trusted value.
+ //
+ // Note that a site root is not a sandbox. Although the file server does
+ // sanitize the request URI to prevent directory traversal, files (including
+ // links) within the site root may be directly accessed based on the request
+ // path. Files and folders within the root should be secure and trustworthy.
+ Root string `json:"root,omitempty"`
+
+ // A list of files or folders to hide; the file server will pretend as if
+ // they don't exist. Accepts globular patterns like `*.ext` or `/foo/*/bar`
+ // as well as placeholders. Because site roots can be dynamic, this list
+ // uses file system paths, not request paths. To clarify, the base of
+ // relative paths is the current working directory, NOT the site root.
+ //
+ // Entries without a path separator (`/` or `\` depending on OS) will match
+ // any file or directory of that name regardless of its path. To hide only a
+ // specific file with a name that may not be unique, always use a path
+ // separator. For example, to hide all files or folder trees named "hidden",
+ // put "hidden" in the list. To hide only ./hidden, put "./hidden" in the list.
+ //
+ // When possible, all paths are resolved to their absolute form before
+ // comparisons are made. For maximum clarity and explictness, use complete,
+ // absolute paths; or, for greater portability, use relative paths instead.
+ Hide []string `json:"hide,omitempty"`
+
+ // The names of files to try as index files if a folder is requested.
+ // Default: index.html, index.txt.
+ IndexNames []string `json:"index_names,omitempty"`
+
+ // Enables file listings if a directory was requested and no index
+ // file is present.
+ Browse *Browse `json:"browse,omitempty"`
+
+ // Use redirects to enforce trailing slashes for directories, or to
+ // remove trailing slash from URIs for files. Default is true.
+ //
+ // Canonicalization will not happen if the last element of the request's
+ // path (the filename) is changed in an internal rewrite, to avoid
+ // clobbering the explicit rewrite with implicit behavior.
+ CanonicalURIs *bool `json:"canonical_uris,omitempty"`
+
+ // Override the status code written when successfully serving a file.
+ // Particularly useful when explicitly serving a file as display for
+ // an error, like a 404 page. A placeholder may be used. By default,
+ // the status code will typically be 200, or 206 for partial content.
+ StatusCode caddyhttp.WeakString `json:"status_code,omitempty"`
+
+ // If pass-thru mode is enabled and a requested file is not found,
+ // it will invoke the next handler in the chain instead of returning
+ // a 404 error. By default, this is false (disabled).
+ PassThru bool `json:"pass_thru,omitempty"`
+
+ // Selection of encoders to use to check for precompressed files.
+ PrecompressedRaw caddy.ModuleMap `json:"precompressed,omitempty" caddy:"namespace=http.precompressed"`
+
+ // If the client has no strong preference (q-factor), choose these encodings in order.
+ // If no order specified here, the first encoding from the Accept-Encoding header
+ // that both client and server support is used
+ PrecompressedOrder []string `json:"precompressed_order,omitempty"`
+ precompressors map[string]encode.Precompressed
+
+ // List of file extensions to try to read Etags from.
+ // If set, file Etags will be read from sidecar files
+ // with any of these suffixes, instead of generating
+ // our own Etag.
+ EtagFileExtensions []string `json:"etag_file_extensions,omitempty"`
+
+ fsmap caddy.FileSystems
+
+ logger *zap.Logger
+}
+
+// CaddyModule returns the Caddy module information.
+func (FileServer) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.handlers.file_server",
+ New: func() caddy.Module { return new(FileServer) },
+ }
+}
+
+// Provision sets up the static files responder.
+func (fsrv *FileServer) Provision(ctx caddy.Context) error {
+ fsrv.logger = ctx.Logger()
+
+ fsrv.fsmap = ctx.Filesystems()
+
+ if fsrv.FileSystem == "" {
+ fsrv.FileSystem = "{http.vars.fs}"
+ }
+
+ if fsrv.Root == "" {
+ fsrv.Root = "{http.vars.root}"
+ }
+
+ if fsrv.IndexNames == nil {
+ fsrv.IndexNames = defaultIndexNames
+ }
+
+ // for hide paths that are static (i.e. no placeholders), we can transform them into
+ // absolute paths before the server starts for very slight performance improvement
+ for i, h := range fsrv.Hide {
+ if !strings.Contains(h, "{") && strings.Contains(h, separator) {
+ if abs, err := caddy.FastAbs(h); err == nil {
+ fsrv.Hide[i] = abs
+ }
+ }
+ }
+
+ // support precompressed sidecar files
+ mods, err := ctx.LoadModule(fsrv, "PrecompressedRaw")
+ if err != nil {
+ return fmt.Errorf("loading encoder modules: %v", err)
+ }
+ for modName, modIface := range mods.(map[string]any) {
+ p, ok := modIface.(encode.Precompressed)
+ if !ok {
+ return fmt.Errorf("module %s is not precompressor", modName)
+ }
+ ae := p.AcceptEncoding()
+ if ae == "" {
+ return fmt.Errorf("precompressor does not specify an Accept-Encoding value")
+ }
+ suffix := p.Suffix()
+ if suffix == "" {
+ return fmt.Errorf("precompressor does not specify a Suffix value")
+ }
+ if _, ok := fsrv.precompressors[ae]; ok {
+ return fmt.Errorf("precompressor already added: %s", ae)
+ }
+ if fsrv.precompressors == nil {
+ fsrv.precompressors = make(map[string]encode.Precompressed)
+ }
+ fsrv.precompressors[ae] = p
+ }
+
+ if fsrv.Browse != nil {
+ // check sort options
+ for idx, sortOption := range fsrv.Browse.SortOptions {
+ switch idx {
+ case 0:
+ if sortOption != sortByName && sortOption != sortByNameDirFirst && sortOption != sortBySize && sortOption != sortByTime {
+ return fmt.Errorf("the first option must be one of the following: %s, %s, %s, %s, but got %s", sortByName, sortByNameDirFirst, sortBySize, sortByTime, sortOption)
+ }
+ case 1:
+ if sortOption != sortOrderAsc && sortOption != sortOrderDesc {
+ return fmt.Errorf("the second option must be one of the following: %s, %s, but got %s", sortOrderAsc, sortOrderDesc, sortOption)
+ }
+ default:
+ return fmt.Errorf("only max 2 sort options are allowed, but got %d", idx+1)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+
+ if runtime.GOOS == "windows" {
+ // reject paths with Alternate Data Streams (ADS)
+ if strings.Contains(r.URL.Path, ":") {
+ return caddyhttp.Error(http.StatusBadRequest, fmt.Errorf("illegal ADS path"))
+ }
+ // reject paths with "8.3" short names
+ trimmedPath := strings.TrimRight(r.URL.Path, ". ") // Windows ignores trailing dots and spaces, sigh
+ if len(path.Base(trimmedPath)) <= 12 && strings.Contains(trimmedPath, "~") {
+ return caddyhttp.Error(http.StatusBadRequest, fmt.Errorf("illegal short name"))
+ }
+ // both of those could bypass file hiding or possibly leak information even if the file is not hidden
+ }
+
+ filesToHide := fsrv.transformHidePaths(repl)
+
+ root := repl.ReplaceAll(fsrv.Root, ".")
+ fsName := repl.ReplaceAll(fsrv.FileSystem, "")
+
+ fileSystem, ok := fsrv.fsmap.Get(fsName)
+ if !ok {
+ return caddyhttp.Error(http.StatusNotFound, fmt.Errorf("filesystem not found"))
+ }
+
+ // remove any trailing `/` as it breaks fs.ValidPath() in the stdlib
+ filename := strings.TrimSuffix(caddyhttp.SanitizedPathJoin(root, r.URL.Path), "/")
+
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "sanitized path join"); c != nil {
+ c.Write(
+ zap.String("site_root", root),
+ zap.String("fs", fsName),
+ zap.String("request_path", r.URL.Path),
+ zap.String("result", filename),
+ )
+ }
+
+ // get information about the file
+ info, err := fs.Stat(fileSystem, filename)
+ if err != nil {
+ err = fsrv.mapDirOpenError(fileSystem, err, filename)
+ if errors.Is(err, fs.ErrNotExist) || errors.Is(err, fs.ErrInvalid) {
+ return fsrv.notFound(w, r, next)
+ } else if errors.Is(err, fs.ErrPermission) {
+ return caddyhttp.Error(http.StatusForbidden, err)
+ }
+ return caddyhttp.Error(http.StatusInternalServerError, err)
+ }
+
+ // if the request mapped to a directory, see if
+ // there is an index file we can serve
+ var implicitIndexFile bool
+ if info.IsDir() && len(fsrv.IndexNames) > 0 {
+ for _, indexPage := range fsrv.IndexNames {
+ indexPage := repl.ReplaceAll(indexPage, "")
+ indexPath := caddyhttp.SanitizedPathJoin(filename, indexPage)
+ if fileHidden(indexPath, filesToHide) {
+ // pretend this file doesn't exist
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "hiding index file"); c != nil {
+ c.Write(
+ zap.String("filename", indexPath),
+ zap.Strings("files_to_hide", filesToHide),
+ )
+ }
+ continue
+ }
+
+ indexInfo, err := fs.Stat(fileSystem, indexPath)
+ if err != nil {
+ continue
+ }
+
+ // don't rewrite the request path to append
+ // the index file, because we might need to
+ // do a canonical-URL redirect below based
+ // on the URL as-is
+
+ // we've chosen to use this index file,
+ // so replace the last file info and path
+ // with that of the index file
+ info = indexInfo
+ filename = indexPath
+ implicitIndexFile = true
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "located index file"); c != nil {
+ c.Write(zap.String("filename", filename))
+ }
+ break
+ }
+ }
+
+ // if still referencing a directory, delegate
+ // to browse or return an error
+ if info.IsDir() {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "no index file in directory"); c != nil {
+ c.Write(
+ zap.String("path", filename),
+ zap.Strings("index_filenames", fsrv.IndexNames),
+ )
+ }
+ if fsrv.Browse != nil && !fileHidden(filename, filesToHide) {
+ return fsrv.serveBrowse(fileSystem, root, filename, w, r, next)
+ }
+ return fsrv.notFound(w, r, next)
+ }
+
+ // one last check to ensure the file isn't hidden (we might
+ // have changed the filename from when we last checked)
+ if fileHidden(filename, filesToHide) {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "hiding file"); c != nil {
+ c.Write(
+ zap.String("filename", filename),
+ zap.Strings("files_to_hide", filesToHide),
+ )
+ }
+ return fsrv.notFound(w, r, next)
+ }
+
+ // if URL canonicalization is enabled, we need to enforce trailing
+ // slash convention: if a directory, trailing slash; if a file, no
+ // trailing slash - not enforcing this can break relative hrefs
+ // in HTML (see https://github.com/caddyserver/caddy/issues/2741)
+ if fsrv.CanonicalURIs == nil || *fsrv.CanonicalURIs {
+ // Only redirect if the last element of the path (the filename) was not
+ // rewritten; if the admin wanted to rewrite to the canonical path, they
+ // would have, and we have to be very careful not to introduce unwanted
+ // redirects and especially redirect loops!
+ // See https://github.com/caddyserver/caddy/issues/4205.
+ origReq := r.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request)
+ if path.Base(origReq.URL.Path) == path.Base(r.URL.Path) {
+ if implicitIndexFile && !strings.HasSuffix(origReq.URL.Path, "/") {
+ to := origReq.URL.Path + "/"
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "redirecting to canonical URI (adding trailing slash for directory"); c != nil {
+ c.Write(
+ zap.String("from_path", origReq.URL.Path),
+ zap.String("to_path", to),
+ )
+ }
+ return redirect(w, r, to)
+ } else if !implicitIndexFile && strings.HasSuffix(origReq.URL.Path, "/") {
+ to := origReq.URL.Path[:len(origReq.URL.Path)-1]
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "redirecting to canonical URI (removing trailing slash for file"); c != nil {
+ c.Write(
+ zap.String("from_path", origReq.URL.Path),
+ zap.String("to_path", to),
+ )
+ }
+ return redirect(w, r, to)
+ }
+ }
+ }
+
+ var file fs.File
+ respHeader := w.Header()
+
+ // etag is usually unset, but if the user knows what they're doing, let them override it
+ etag := respHeader.Get("Etag")
+
+ // static file responses are often compressed, either on-the-fly
+ // or with precompressed sidecar files; in any case, the headers
+ // should contain "Vary: Accept-Encoding" even when not compressed
+ // so caches can craft a reliable key (according to REDbot results)
+ // see #5849
+ respHeader.Add("Vary", "Accept-Encoding")
+
+ // check for precompressed files
+ for _, ae := range encode.AcceptedEncodings(r, fsrv.PrecompressedOrder) {
+ precompress, ok := fsrv.precompressors[ae]
+ if !ok {
+ continue
+ }
+ compressedFilename := filename + precompress.Suffix()
+ compressedInfo, err := fs.Stat(fileSystem, compressedFilename)
+ if err != nil || compressedInfo.IsDir() {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "precompressed file not accessible"); c != nil {
+ c.Write(zap.String("filename", compressedFilename), zap.Error(err))
+ }
+ continue
+ }
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "opening compressed sidecar file"); c != nil {
+ c.Write(zap.String("filename", compressedFilename), zap.Error(err))
+ }
+ file, err = fsrv.openFile(fileSystem, compressedFilename, w)
+ if err != nil {
+ if c := fsrv.logger.Check(zapcore.WarnLevel, "opening precompressed file failed"); c != nil {
+ c.Write(zap.String("filename", compressedFilename), zap.Error(err))
+ }
+ if caddyErr, ok := err.(caddyhttp.HandlerError); ok && caddyErr.StatusCode == http.StatusServiceUnavailable {
+ return err
+ }
+ file = nil
+ continue
+ }
+ defer file.Close()
+ respHeader.Set("Content-Encoding", ae)
+ respHeader.Del("Accept-Ranges")
+
+ // try to get the etag from pre computed files if an etag suffix list was provided
+ if etag == "" && fsrv.EtagFileExtensions != nil {
+ etag, err = fsrv.getEtagFromFile(fileSystem, compressedFilename)
+ if err != nil {
+ return err
+ }
+ }
+
+ // don't assign info = compressedInfo because sidecars are kind
+ // of transparent; however we do need to set the Etag:
+ // https://caddy.community/t/gzipped-sidecar-file-wrong-same-etag/16793
+ if etag == "" {
+ etag = calculateEtag(compressedInfo)
+ }
+
+ break
+ }
+
+ // no precompressed file found, use the actual file
+ if file == nil {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "opening file"); c != nil {
+ c.Write(zap.String("filename", filename))
+ }
+
+ // open the file
+ file, err = fsrv.openFile(fileSystem, filename, w)
+ if err != nil {
+ if herr, ok := err.(caddyhttp.HandlerError); ok &&
+ herr.StatusCode == http.StatusNotFound {
+ return fsrv.notFound(w, r, next)
+ }
+ return err // error is already structured
+ }
+ defer file.Close()
+ // try to get the etag from pre computed files if an etag suffix list was provided
+ if etag == "" && fsrv.EtagFileExtensions != nil {
+ etag, err = fsrv.getEtagFromFile(fileSystem, filename)
+ if err != nil {
+ return err
+ }
+ }
+ if etag == "" {
+ etag = calculateEtag(info)
+ }
+ }
+
+ // at this point, we're serving a file; Go std lib supports only
+ // GET and HEAD, which is sensible for a static file server - reject
+ // any other methods (see issue #5166)
+ if r.Method != http.MethodGet && r.Method != http.MethodHead {
+ // if we're in an error context, then it doesn't make sense
+ // to repeat the error; just continue because we're probably
+ // trying to write an error page response (see issue #5703)
+ if _, ok := r.Context().Value(caddyhttp.ErrorCtxKey).(error); !ok {
+ respHeader.Add("Allow", "GET, HEAD")
+ return caddyhttp.Error(http.StatusMethodNotAllowed, nil)
+ }
+ }
+
+ // set the Etag - note that a conditional If-None-Match request is handled
+ // by http.ServeContent below, which checks against this Etag value
+ if etag != "" {
+ respHeader.Set("Etag", etag)
+ }
+
+ if respHeader.Get("Content-Type") == "" {
+ mtyp := mime.TypeByExtension(filepath.Ext(filename))
+ if mtyp == "" {
+ // do not allow Go to sniff the content-type; see https://www.youtube.com/watch?v=8t8JYpt0egE
+ respHeader["Content-Type"] = nil
+ } else {
+ respHeader.Set("Content-Type", mtyp)
+ }
+ }
+
+ var statusCodeOverride int
+
+ // if this handler exists in an error context (i.e. is part of a
+ // handler chain that is supposed to handle a previous error),
+ // we should set status code to the one from the error instead
+ // of letting http.ServeContent set the default (usually 200)
+ if reqErr, ok := r.Context().Value(caddyhttp.ErrorCtxKey).(error); ok {
+ statusCodeOverride = http.StatusInternalServerError
+ if handlerErr, ok := reqErr.(caddyhttp.HandlerError); ok {
+ if handlerErr.StatusCode > 0 {
+ statusCodeOverride = handlerErr.StatusCode
+ }
+ }
+ }
+
+ // if a status code override is configured, run the replacer on it
+ if codeStr := fsrv.StatusCode.String(); codeStr != "" {
+ statusCodeOverride, err = strconv.Atoi(repl.ReplaceAll(codeStr, ""))
+ if err != nil {
+ return caddyhttp.Error(http.StatusInternalServerError, err)
+ }
+ }
+
+ // if we do have an override from the previous two parts, then
+ // we wrap the response writer to intercept the WriteHeader call
+ if statusCodeOverride > 0 {
+ w = statusOverrideResponseWriter{ResponseWriter: w, code: statusCodeOverride}
+ }
+
+ // let the standard library do what it does best; note, however,
+ // that errors generated by ServeContent are written immediately
+ // to the response, so we cannot handle them (but errors there
+ // are rare)
+ http.ServeContent(w, r, info.Name(), info.ModTime(), file.(io.ReadSeeker))
+
+ return nil
+}
+
+// openFile opens the file at the given filename. If there was an error,
+// the response is configured to inform the client how to best handle it
+// and a well-described handler error is returned (do not wrap the
+// returned error value).
+func (fsrv *FileServer) openFile(fileSystem fs.FS, filename string, w http.ResponseWriter) (fs.File, error) {
+ file, err := fileSystem.Open(filename)
+ if err != nil {
+ err = fsrv.mapDirOpenError(fileSystem, err, filename)
+ if errors.Is(err, fs.ErrNotExist) {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "file not found"); c != nil {
+ c.Write(zap.String("filename", filename), zap.Error(err))
+ }
+ return nil, caddyhttp.Error(http.StatusNotFound, err)
+ } else if errors.Is(err, fs.ErrPermission) {
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "permission denied"); c != nil {
+ c.Write(zap.String("filename", filename), zap.Error(err))
+ }
+ return nil, caddyhttp.Error(http.StatusForbidden, err)
+ }
+ // maybe the server is under load and ran out of file descriptors?
+ // have client wait arbitrary seconds to help prevent a stampede
+ //nolint:gosec
+ backoff := weakrand.Intn(maxBackoff-minBackoff) + minBackoff
+ w.Header().Set("Retry-After", strconv.Itoa(backoff))
+ if c := fsrv.logger.Check(zapcore.DebugLevel, "retry after backoff"); c != nil {
+ c.Write(zap.String("filename", filename), zap.Int("backoff", backoff), zap.Error(err))
+ }
+ return nil, caddyhttp.Error(http.StatusServiceUnavailable, err)
+ }
+ return file, nil
+}
+
+// mapDirOpenError maps the provided non-nil error from opening name
+// to a possibly better non-nil error. In particular, it turns OS-specific errors
+// about opening files in non-directories into os.ErrNotExist. See golang/go#18984.
+// Adapted from the Go standard library; originally written by Nathaniel Caza.
+// https://go-review.googlesource.com/c/go/+/36635/
+// https://go-review.googlesource.com/c/go/+/36804/
+func (fsrv *FileServer) mapDirOpenError(fileSystem fs.FS, originalErr error, name string) error {
+ if errors.Is(originalErr, fs.ErrNotExist) || errors.Is(originalErr, fs.ErrPermission) {
+ return originalErr
+ }
+
+ parts := strings.Split(name, separator)
+ for i := range parts {
+ if parts[i] == "" {
+ continue
+ }
+ fi, err := fs.Stat(fileSystem, strings.Join(parts[:i+1], separator))
+ if err != nil {
+ return originalErr
+ }
+ if !fi.IsDir() {
+ return fs.ErrNotExist
+ }
+ }
+
+ return originalErr
+}
+
+// transformHidePaths performs replacements for all the elements of fsrv.Hide and
+// makes them absolute paths (if they contain a path separator), then returns a
+// new list of the transformed values.
+func (fsrv *FileServer) transformHidePaths(repl *caddy.Replacer) []string {
+ hide := make([]string, len(fsrv.Hide))
+ for i := range fsrv.Hide {
+ hide[i] = repl.ReplaceAll(fsrv.Hide[i], "")
+ if strings.Contains(hide[i], separator) {
+ abs, err := caddy.FastAbs(hide[i])
+ if err == nil {
+ hide[i] = abs
+ }
+ }
+ }
+ return hide
+}
+
+// fileHidden returns true if filename is hidden according to the hide list.
+// filename must be a relative or absolute file system path, not a request
+// URI path. It is expected that all the paths in the hide list are absolute
+// paths or are singular filenames (without a path separator).
+func fileHidden(filename string, hide []string) bool {
+ if len(hide) == 0 {
+ return false
+ }
+
+ // all path comparisons use the complete absolute path if possible
+ filenameAbs, err := caddy.FastAbs(filename)
+ if err == nil {
+ filename = filenameAbs
+ }
+
+ var components []string
+
+ for _, h := range hide {
+ if !strings.Contains(h, separator) {
+ // if there is no separator in h, then we assume the user
+ // wants to hide any files or folders that match that
+ // name; thus we have to compare against each component
+ // of the filename, e.g. hiding "bar" would hide "/bar"
+ // as well as "/foo/bar/baz" but not "/barstool".
+ if len(components) == 0 {
+ components = strings.Split(filename, separator)
+ }
+ for _, c := range components {
+ if hidden, _ := filepath.Match(h, c); hidden {
+ return true
+ }
+ }
+ } else if strings.HasPrefix(filename, h) {
+ // if there is a separator in h, and filename is exactly
+ // prefixed with h, then we can do a prefix match so that
+ // "/foo" matches "/foo/bar" but not "/foobar".
+ withoutPrefix := strings.TrimPrefix(filename, h)
+ if strings.HasPrefix(withoutPrefix, separator) {
+ return true
+ }
+ }
+
+ // in the general case, a glob match will suffice
+ if hidden, _ := filepath.Match(h, filename); hidden {
+ return true
+ }
+ }
+
+ return false
+}
+
+// notFound returns a 404 error or, if pass-thru is enabled,
+// it calls the next handler in the chain.
+func (fsrv *FileServer) notFound(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ if fsrv.PassThru {
+ return next.ServeHTTP(w, r)
+ }
+ return caddyhttp.Error(http.StatusNotFound, nil)
+}
+
+// calculateEtag computes an entity tag using a strong validator
+// without consuming the contents of the file. It requires the
+// file info contain the correct size and modification time.
+// It strives to implement the semantics regarding ETags as defined
+// by RFC 9110 section 8.8.3 and 8.8.1. See
+// https://www.rfc-editor.org/rfc/rfc9110.html#section-8.8.3.
+//
+// As our implementation uses file modification timestamp and size,
+// note the following from RFC 9110 section 8.8.1: "A representation's
+// modification time, if defined with only one-second resolution,
+// might be a weak validator if it is possible for the representation to
+// be modified twice during a single second and retrieved between those
+// modifications." The ext4 file system, which underpins the vast majority
+// of Caddy deployments, stores mod times with millisecond precision,
+// which we consider precise enough to qualify as a strong validator.
+func calculateEtag(d os.FileInfo) string {
+ mtime := d.ModTime()
+ if mtimeUnix := mtime.Unix(); mtimeUnix == 0 || mtimeUnix == 1 {
+ return "" // not useful anyway; see issue #5548
+ }
+ var sb strings.Builder
+ sb.WriteRune('"')
+ sb.WriteString(strconv.FormatInt(mtime.UnixNano(), 36))
+ sb.WriteString(strconv.FormatInt(d.Size(), 36))
+ sb.WriteRune('"')
+ return sb.String()
+}
+
+// Finds the first corresponding etag file for a given file in the file system and return its content
+func (fsrv *FileServer) getEtagFromFile(fileSystem fs.FS, filename string) (string, error) {
+ for _, suffix := range fsrv.EtagFileExtensions {
+ etagFilename := filename + suffix
+ etag, err := fs.ReadFile(fileSystem, etagFilename)
+ if errors.Is(err, fs.ErrNotExist) {
+ continue
+ }
+ if err != nil {
+ return "", fmt.Errorf("cannot read etag from file %s: %v", etagFilename, err)
+ }
+
+ // Etags should not contain newline characters
+ etag = bytes.ReplaceAll(etag, []byte("\n"), []byte{})
+
+ return string(etag), nil
+ }
+ return "", nil
+}
+
+// redirect performs a redirect to a given path. The 'toPath' parameter
+// MUST be solely a path, and MUST NOT include a query.
+func redirect(w http.ResponseWriter, r *http.Request, toPath string) error {
+ for strings.HasPrefix(toPath, "//") {
+ // prevent path-based open redirects
+ toPath = strings.TrimPrefix(toPath, "/")
+ }
+ // preserve the query string if present
+ if r.URL.RawQuery != "" {
+ toPath += "?" + r.URL.RawQuery
+ }
+ http.Redirect(w, r, toPath, http.StatusPermanentRedirect)
+ return nil
+}
+
+// statusOverrideResponseWriter intercepts WriteHeader calls
+// to instead write the HTTP status code we want instead
+// of the one http.ServeContent will use by default (usually 200)
+type statusOverrideResponseWriter struct {
+ http.ResponseWriter
+ code int
+}
+
+// WriteHeader intercepts calls by the stdlib to WriteHeader
+// to instead write the HTTP status code we want.
+func (wr statusOverrideResponseWriter) WriteHeader(int) {
+ wr.ResponseWriter.WriteHeader(wr.code)
+}
+
+// Unwrap returns the underlying ResponseWriter, necessary for
+// http.ResponseController to work correctly.
+func (wr statusOverrideResponseWriter) Unwrap() http.ResponseWriter {
+ return wr.ResponseWriter
+}
+
+var defaultIndexNames = []string{"index.html", "index.txt"}
+
+const (
+ minBackoff, maxBackoff = 2, 5
+ separator = string(filepath.Separator)
+)
+
+// Interface guards
+var (
+ _ caddy.Provisioner = (*FileServer)(nil)
+ _ caddyhttp.MiddlewareHandler = (*FileServer)(nil)
+)
diff --git a/modules/caddyhttp/fileserver/staticfiles_test.go b/modules/caddyhttp/fileserver/staticfiles_test.go
new file mode 100644
index 00000000000..5d6133c731d
--- /dev/null
+++ b/modules/caddyhttp/fileserver/staticfiles_test.go
@@ -0,0 +1,130 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileserver
+
+import (
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestFileHidden(t *testing.T) {
+ for i, tc := range []struct {
+ inputHide []string
+ inputPath string
+ expect bool
+ }{
+ {
+ inputHide: nil,
+ inputPath: "",
+ expect: false,
+ },
+ {
+ inputHide: []string{".gitignore"},
+ inputPath: "/.gitignore",
+ expect: true,
+ },
+ {
+ inputHide: []string{".git"},
+ inputPath: "/.gitignore",
+ expect: false,
+ },
+ {
+ inputHide: []string{"/.git"},
+ inputPath: "/.gitignore",
+ expect: false,
+ },
+ {
+ inputHide: []string{".git"},
+ inputPath: "/.git",
+ expect: true,
+ },
+ {
+ inputHide: []string{".git"},
+ inputPath: "/.git/foo",
+ expect: true,
+ },
+ {
+ inputHide: []string{".git"},
+ inputPath: "/foo/.git/bar",
+ expect: true,
+ },
+ {
+ inputHide: []string{"/prefix"},
+ inputPath: "/prefix/foo",
+ expect: true,
+ },
+ {
+ inputHide: []string{"/foo/*/bar"},
+ inputPath: "/foo/asdf/bar",
+ expect: true,
+ },
+ {
+ inputHide: []string{"*.txt"},
+ inputPath: "/foo/bar.txt",
+ expect: true,
+ },
+ {
+ inputHide: []string{"/foo/bar/*.txt"},
+ inputPath: "/foo/bar/baz.txt",
+ expect: true,
+ },
+ {
+ inputHide: []string{"/foo/bar/*.txt"},
+ inputPath: "/foo/bar.txt",
+ expect: false,
+ },
+ {
+ inputHide: []string{"/foo/bar/*.txt"},
+ inputPath: "/foo/bar/index.html",
+ expect: false,
+ },
+ {
+ inputHide: []string{"/foo"},
+ inputPath: "/foo",
+ expect: true,
+ },
+ {
+ inputHide: []string{"/foo"},
+ inputPath: "/foobar",
+ expect: false,
+ },
+ {
+ inputHide: []string{"first", "second"},
+ inputPath: "/second",
+ expect: true,
+ },
+ } {
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(tc.inputPath, "/") {
+ tc.inputPath, _ = filepath.Abs(tc.inputPath)
+ }
+ tc.inputPath = filepath.FromSlash(tc.inputPath)
+ for i := range tc.inputHide {
+ if strings.HasPrefix(tc.inputHide[i], "/") {
+ tc.inputHide[i], _ = filepath.Abs(tc.inputHide[i])
+ }
+ tc.inputHide[i] = filepath.FromSlash(tc.inputHide[i])
+ }
+ }
+
+ actual := fileHidden(tc.inputPath, tc.inputHide)
+ if actual != tc.expect {
+ t.Errorf("Test %d: Does %v hide %s? Got %t but expected %t",
+ i, tc.inputHide, tc.inputPath, actual, tc.expect)
+ }
+ }
+}
diff --git a/modules/caddyhttp/fileserver/testdata/%D9%85%D9%84%D9%81.txt b/modules/caddyhttp/fileserver/testdata/%D9%85%D9%84%D9%81.txt
new file mode 100644
index 00000000000..0f4bf1a9903
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/%D9%85%D9%84%D9%81.txt
@@ -0,0 +1 @@
+%D9%85%D9%84%D9%81.txt
\ No newline at end of file
diff --git a/modules/caddyhttp/fileserver/testdata/foo.php.php/index.php b/modules/caddyhttp/fileserver/testdata/foo.php.php/index.php
new file mode 100644
index 00000000000..4a2ac6b2f75
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/foo.php.php/index.php
@@ -0,0 +1 @@
+foo.php.php/index.php
diff --git a/modules/caddyhttp/fileserver/testdata/foo.txt b/modules/caddyhttp/fileserver/testdata/foo.txt
new file mode 100644
index 00000000000..996f1789ff6
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/foo.txt
@@ -0,0 +1 @@
+foo.txt
\ No newline at end of file
diff --git a/modules/caddyhttp/fileserver/testdata/foodir/bar.txt b/modules/caddyhttp/fileserver/testdata/foodir/bar.txt
new file mode 100644
index 00000000000..df34bd20398
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/foodir/bar.txt
@@ -0,0 +1 @@
+foodir/bar.txt
\ No newline at end of file
diff --git a/modules/caddyhttp/fileserver/testdata/foodir/foo.txt b/modules/caddyhttp/fileserver/testdata/foodir/foo.txt
new file mode 100644
index 00000000000..0e3335b42b6
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/foodir/foo.txt
@@ -0,0 +1 @@
+foodir/foo.txt
\ No newline at end of file
diff --git a/modules/caddyhttp/fileserver/testdata/index.php b/modules/caddyhttp/fileserver/testdata/index.php
new file mode 100644
index 00000000000..0012f7d2344
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/index.php
@@ -0,0 +1 @@
+index.php
\ No newline at end of file
diff --git a/modules/caddyhttp/fileserver/testdata/large.txt b/modules/caddyhttp/fileserver/testdata/large.txt
new file mode 100644
index 00000000000..c3662374432
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/large.txt
@@ -0,0 +1,3 @@
+This is a file with more content than the other files in this directory
+such that tests using the largest_size policy pick this file, or the
+smallest_size policy avoids this file.
\ No newline at end of file
diff --git a/modules/caddyhttp/fileserver/testdata/notphp.php.txt b/modules/caddyhttp/fileserver/testdata/notphp.php.txt
new file mode 100644
index 00000000000..eba18761b0f
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/notphp.php.txt
@@ -0,0 +1 @@
+notphp.php.txt
diff --git a/modules/caddyhttp/fileserver/testdata/remote.php b/modules/caddyhttp/fileserver/testdata/remote.php
new file mode 100644
index 00000000000..78f06a2ad55
--- /dev/null
+++ b/modules/caddyhttp/fileserver/testdata/remote.php
@@ -0,0 +1 @@
+remote.php
\ No newline at end of file
diff --git "a/modules/caddyhttp/fileserver/testdata/\331\205\331\204\331\201.txt" "b/modules/caddyhttp/fileserver/testdata/\331\205\331\204\331\201.txt"
new file mode 100644
index 00000000000..9185828635d
--- /dev/null
+++ "b/modules/caddyhttp/fileserver/testdata/\331\205\331\204\331\201.txt"
@@ -0,0 +1 @@
+ملف.txt
\ No newline at end of file
diff --git a/modules/caddyhttp/headers/caddyfile.go b/modules/caddyhttp/headers/caddyfile.go
new file mode 100644
index 00000000000..f060471b100
--- /dev/null
+++ b/modules/caddyhttp/headers/caddyfile.go
@@ -0,0 +1,291 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package headers
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ httpcaddyfile.RegisterDirective("header", parseCaddyfile)
+ httpcaddyfile.RegisterDirective("request_header", parseReqHdrCaddyfile)
+}
+
+// parseCaddyfile sets up the handler for response headers from
+// Caddyfile tokens. Syntax:
+//
+// header [] [[+|-|?|>] [] []] {
+// [+] [ []]
+// ?
+// -
+// >
+// [defer]
+// }
+//
+// Either a block can be opened or a single header field can be configured
+// in the first line, but not both in the same directive. Header operations
+// are deferred to write-time if any headers are being deleted or if the
+// 'defer' subdirective is used. + appends a header value, - deletes a field,
+// ? conditionally sets a value only if the header field is not already set,
+// and > sets a field with defer enabled.
+func parseCaddyfile(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) {
+ h.Next() // consume directive name
+ matcherSet, err := h.ExtractMatcherSet()
+ if err != nil {
+ return nil, err
+ }
+ h.Next() // consume the directive name again (matcher parsing resets)
+
+ makeHandler := func() Handler {
+ return Handler{
+ Response: &RespHeaderOps{
+ HeaderOps: &HeaderOps{},
+ },
+ }
+ }
+ handler, handlerWithRequire := makeHandler(), makeHandler()
+
+ // first see if headers are in the initial line
+ var hasArgs bool
+ if h.NextArg() {
+ hasArgs = true
+ field := h.Val()
+ var value string
+ var replacement *string
+ if h.NextArg() {
+ value = h.Val()
+ }
+ if h.NextArg() {
+ arg := h.Val()
+ replacement = &arg
+ }
+ err := applyHeaderOp(
+ handler.Response.HeaderOps,
+ handler.Response,
+ field,
+ value,
+ replacement,
+ )
+ if err != nil {
+ return nil, h.Err(err.Error())
+ }
+ if len(handler.Response.HeaderOps.Delete) > 0 {
+ handler.Response.Deferred = true
+ }
+ }
+
+ // if not, they should be in a block
+ for h.NextBlock(0) {
+ field := h.Val()
+ if field == "defer" {
+ handler.Response.Deferred = true
+ continue
+ }
+ if field == "match" {
+ responseMatchers := make(map[string]caddyhttp.ResponseMatcher)
+ err := caddyhttp.ParseNamedResponseMatcher(h.NewFromNextSegment(), responseMatchers)
+ if err != nil {
+ return nil, err
+ }
+ matcher := responseMatchers["match"]
+ handler.Response.Require = &matcher
+ continue
+ }
+ if hasArgs {
+ return nil, h.Err("cannot specify headers in both arguments and block") // because it would be weird
+ }
+
+ // sometimes it is habitual for users to suffix a field name with a colon,
+ // as if they were writing a curl command or something; see
+ // https://caddy.community/t/v2-reverse-proxy-please-add-cors-example-to-the-docs/7349/19
+ field = strings.TrimSuffix(field, ":")
+
+ var value string
+ var replacement *string
+ if h.NextArg() {
+ value = h.Val()
+ }
+ if h.NextArg() {
+ arg := h.Val()
+ replacement = &arg
+ }
+
+ handlerToUse := handler
+ if strings.HasPrefix(field, "?") {
+ handlerToUse = handlerWithRequire
+ }
+
+ err := applyHeaderOp(
+ handlerToUse.Response.HeaderOps,
+ handlerToUse.Response,
+ field,
+ value,
+ replacement,
+ )
+ if err != nil {
+ return nil, h.Err(err.Error())
+ }
+ }
+
+ var configValues []httpcaddyfile.ConfigValue
+ if !reflect.DeepEqual(handler, makeHandler()) {
+ configValues = append(configValues, h.NewRoute(matcherSet, handler)...)
+ }
+ if !reflect.DeepEqual(handlerWithRequire, makeHandler()) {
+ configValues = append(configValues, h.NewRoute(matcherSet, handlerWithRequire)...)
+ }
+
+ return configValues, nil
+}
+
+// parseReqHdrCaddyfile sets up the handler for request headers
+// from Caddyfile tokens. Syntax:
+//
+// request_header [] [[+|-] [] []]
+func parseReqHdrCaddyfile(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) {
+ h.Next() // consume directive name
+ matcherSet, err := h.ExtractMatcherSet()
+ if err != nil {
+ return nil, err
+ }
+ h.Next() // consume the directive name again (matcher parsing resets)
+
+ configValues := []httpcaddyfile.ConfigValue{}
+
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ field := h.Val()
+
+ hdr := Handler{
+ Request: &HeaderOps{},
+ }
+
+ // sometimes it is habitual for users to suffix a field name with a colon,
+ // as if they were writing a curl command or something; see
+ // https://caddy.community/t/v2-reverse-proxy-please-add-cors-example-to-the-docs/7349/19
+ field = strings.TrimSuffix(field, ":")
+
+ var value string
+ var replacement *string
+ if h.NextArg() {
+ value = h.Val()
+ }
+ if h.NextArg() {
+ arg := h.Val()
+ replacement = &arg
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ }
+
+ if hdr.Request == nil {
+ hdr.Request = new(HeaderOps)
+ }
+ if err := CaddyfileHeaderOp(hdr.Request, field, value, replacement); err != nil {
+ return nil, h.Err(err.Error())
+ }
+
+ configValues = append(configValues, h.NewRoute(matcherSet, hdr)...)
+
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ return configValues, nil
+}
+
+// CaddyfileHeaderOp applies a new header operation according to
+// field, value, and replacement. The field can be prefixed with
+// "+" or "-" to specify adding or removing; otherwise, the value
+// will be set (overriding any previous value). If replacement is
+// non-nil, value will be treated as a regular expression which
+// will be used to search and then replacement will be used to
+// complete the substring replacement; in that case, any + or -
+// prefix to field will be ignored.
+func CaddyfileHeaderOp(ops *HeaderOps, field, value string, replacement *string) error {
+ return applyHeaderOp(ops, nil, field, value, replacement)
+}
+
+func applyHeaderOp(ops *HeaderOps, respHeaderOps *RespHeaderOps, field, value string, replacement *string) error {
+ switch {
+ case strings.HasPrefix(field, "+"): // append
+ if ops.Add == nil {
+ ops.Add = make(http.Header)
+ }
+ ops.Add.Add(field[1:], value)
+
+ case strings.HasPrefix(field, "-"): // delete
+ ops.Delete = append(ops.Delete, field[1:])
+ if respHeaderOps != nil {
+ respHeaderOps.Deferred = true
+ }
+
+ case strings.HasPrefix(field, "?"): // default (conditional on not existing) - response headers only
+ if respHeaderOps == nil {
+ return fmt.Errorf("%v: the default header modifier ('?') can only be used on response headers; for conditional manipulation of request headers, use matchers", field)
+ }
+ if respHeaderOps.Require == nil {
+ respHeaderOps.Require = &caddyhttp.ResponseMatcher{
+ Headers: make(http.Header),
+ }
+ }
+ field = strings.TrimPrefix(field, "?")
+ respHeaderOps.Require.Headers[field] = nil
+ if respHeaderOps.Set == nil {
+ respHeaderOps.Set = make(http.Header)
+ }
+ respHeaderOps.Set.Set(field, value)
+
+ case replacement != nil: // replace
+ // allow defer shortcut for replace syntax
+ if strings.HasPrefix(field, ">") && respHeaderOps != nil {
+ respHeaderOps.Deferred = true
+ }
+ if ops.Replace == nil {
+ ops.Replace = make(map[string][]Replacement)
+ }
+ field = strings.TrimLeft(field, "+-?>")
+ ops.Replace[field] = append(
+ ops.Replace[field],
+ Replacement{
+ SearchRegexp: value,
+ Replace: *replacement,
+ },
+ )
+
+ case strings.HasPrefix(field, ">"): // set (overwrite) with defer
+ if ops.Set == nil {
+ ops.Set = make(http.Header)
+ }
+ ops.Set.Set(field[1:], value)
+ if respHeaderOps != nil {
+ respHeaderOps.Deferred = true
+ }
+
+ default: // set (overwrite)
+ if ops.Set == nil {
+ ops.Set = make(http.Header)
+ }
+ ops.Set.Set(field, value)
+ }
+
+ return nil
+}
diff --git a/modules/caddyhttp/headers/headers.go b/modules/caddyhttp/headers/headers.go
new file mode 100644
index 00000000000..c66bd414497
--- /dev/null
+++ b/modules/caddyhttp/headers/headers.go
@@ -0,0 +1,374 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package headers
+
+import (
+ "fmt"
+ "net/http"
+ "regexp"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ caddy.RegisterModule(Handler{})
+}
+
+// Handler is a middleware which modifies request and response headers.
+//
+// Changes to headers are applied immediately, except for the response
+// headers when Deferred is true or when Required is set. In those cases,
+// the changes are applied when the headers are written to the response.
+// Note that deferred changes do not take effect if an error occurs later
+// in the middleware chain.
+//
+// Properties in this module accept placeholders.
+//
+// Response header operations can be conditioned upon response status code
+// and/or other header values.
+type Handler struct {
+ Request *HeaderOps `json:"request,omitempty"`
+ Response *RespHeaderOps `json:"response,omitempty"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (Handler) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.handlers.headers",
+ New: func() caddy.Module { return new(Handler) },
+ }
+}
+
+// Provision sets up h's configuration.
+func (h *Handler) Provision(ctx caddy.Context) error {
+ if h.Request != nil {
+ err := h.Request.Provision(ctx)
+ if err != nil {
+ return err
+ }
+ }
+ if h.Response != nil {
+ err := h.Response.Provision(ctx)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate ensures h's configuration is valid.
+func (h Handler) Validate() error {
+ if h.Request != nil {
+ err := h.Request.validate()
+ if err != nil {
+ return err
+ }
+ }
+ if h.Response != nil {
+ err := h.Response.validate()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+
+ if h.Request != nil {
+ h.Request.ApplyToRequest(r)
+ }
+
+ if h.Response != nil {
+ if h.Response.Deferred || h.Response.Require != nil {
+ w = &responseWriterWrapper{
+ ResponseWriterWrapper: &caddyhttp.ResponseWriterWrapper{ResponseWriter: w},
+ replacer: repl,
+ require: h.Response.Require,
+ headerOps: h.Response.HeaderOps,
+ }
+ } else {
+ h.Response.ApplyTo(w.Header(), repl)
+ }
+ }
+
+ return next.ServeHTTP(w, r)
+}
+
+// HeaderOps defines manipulations for HTTP headers.
+type HeaderOps struct {
+ // Adds HTTP headers; does not replace any existing header fields.
+ Add http.Header `json:"add,omitempty"`
+
+ // Sets HTTP headers; replaces existing header fields.
+ Set http.Header `json:"set,omitempty"`
+
+ // Names of HTTP header fields to delete. Basic wildcards are supported:
+ //
+ // - Start with `*` for all field names with the given suffix;
+ // - End with `*` for all field names with the given prefix;
+ // - Start and end with `*` for all field names containing a substring.
+ Delete []string `json:"delete,omitempty"`
+
+ // Performs in-situ substring replacements of HTTP headers.
+ // Keys are the field names on which to perform the associated replacements.
+ // If the field name is `*`, the replacements are performed on all header fields.
+ Replace map[string][]Replacement `json:"replace,omitempty"`
+}
+
+// Provision sets up the header operations.
+func (ops *HeaderOps) Provision(_ caddy.Context) error {
+ for fieldName, replacements := range ops.Replace {
+ for i, r := range replacements {
+ if r.SearchRegexp == "" {
+ continue
+ }
+ re, err := regexp.Compile(r.SearchRegexp)
+ if err != nil {
+ return fmt.Errorf("replacement %d for header field '%s': %v", i, fieldName, err)
+ }
+ replacements[i].re = re
+ }
+ }
+ return nil
+}
+
+func (ops HeaderOps) validate() error {
+ for fieldName, replacements := range ops.Replace {
+ for _, r := range replacements {
+ if r.Search != "" && r.SearchRegexp != "" {
+ return fmt.Errorf("cannot specify both a substring search and a regular expression search for field '%s'", fieldName)
+ }
+ }
+ }
+ return nil
+}
+
+// Replacement describes a string replacement,
+// either a simple and fast substring search
+// or a slower but more powerful regex search.
+type Replacement struct {
+ // The substring to search for.
+ Search string `json:"search,omitempty"`
+
+ // The regular expression to search with.
+ SearchRegexp string `json:"search_regexp,omitempty"`
+
+ // The string with which to replace matches.
+ Replace string `json:"replace,omitempty"`
+
+ re *regexp.Regexp
+}
+
+// RespHeaderOps defines manipulations for response headers.
+type RespHeaderOps struct {
+ *HeaderOps
+
+ // If set, header operations will be deferred until
+ // they are written out and only performed if the
+ // response matches these criteria.
+ Require *caddyhttp.ResponseMatcher `json:"require,omitempty"`
+
+ // If true, header operations will be deferred until
+ // they are written out. Superseded if Require is set.
+ // Usually you will need to set this to true if any
+ // fields are being deleted.
+ Deferred bool `json:"deferred,omitempty"`
+}
+
+// ApplyTo applies ops to hdr using repl.
+func (ops HeaderOps) ApplyTo(hdr http.Header, repl *caddy.Replacer) {
+ // before manipulating headers in other ways, check if there
+ // is configuration to delete all headers, and do that first
+ // because if a header is to be added, we don't want to delete
+ // it also
+ for _, fieldName := range ops.Delete {
+ fieldName = repl.ReplaceKnown(fieldName, "")
+ if fieldName == "*" {
+ clear(hdr)
+ }
+ }
+
+ // add
+ for fieldName, vals := range ops.Add {
+ fieldName = repl.ReplaceKnown(fieldName, "")
+ for _, v := range vals {
+ hdr.Add(fieldName, repl.ReplaceKnown(v, ""))
+ }
+ }
+
+ // set
+ for fieldName, vals := range ops.Set {
+ fieldName = repl.ReplaceKnown(fieldName, "")
+ var newVals []string
+ for i := range vals {
+ // append to new slice so we don't overwrite
+ // the original values in ops.Set
+ newVals = append(newVals, repl.ReplaceKnown(vals[i], ""))
+ }
+ hdr.Set(fieldName, strings.Join(newVals, ","))
+ }
+
+ // delete
+ for _, fieldName := range ops.Delete {
+ fieldName = strings.ToLower(repl.ReplaceKnown(fieldName, ""))
+ if fieldName == "*" {
+ continue // handled above
+ }
+ switch {
+ case strings.HasPrefix(fieldName, "*") && strings.HasSuffix(fieldName, "*"):
+ for existingField := range hdr {
+ if strings.Contains(strings.ToLower(existingField), fieldName[1:len(fieldName)-1]) {
+ delete(hdr, existingField)
+ }
+ }
+ case strings.HasPrefix(fieldName, "*"):
+ for existingField := range hdr {
+ if strings.HasSuffix(strings.ToLower(existingField), fieldName[1:]) {
+ delete(hdr, existingField)
+ }
+ }
+ case strings.HasSuffix(fieldName, "*"):
+ for existingField := range hdr {
+ if strings.HasPrefix(strings.ToLower(existingField), fieldName[:len(fieldName)-1]) {
+ delete(hdr, existingField)
+ }
+ }
+ default:
+ hdr.Del(fieldName)
+ }
+ }
+
+ // replace
+ for fieldName, replacements := range ops.Replace {
+ fieldName = http.CanonicalHeaderKey(repl.ReplaceKnown(fieldName, ""))
+
+ // all fields...
+ if fieldName == "*" {
+ for _, r := range replacements {
+ search := repl.ReplaceKnown(r.Search, "")
+ replace := repl.ReplaceKnown(r.Replace, "")
+ for fieldName, vals := range hdr {
+ for i := range vals {
+ if r.re != nil {
+ hdr[fieldName][i] = r.re.ReplaceAllString(hdr[fieldName][i], replace)
+ } else {
+ hdr[fieldName][i] = strings.ReplaceAll(hdr[fieldName][i], search, replace)
+ }
+ }
+ }
+ }
+ continue
+ }
+
+ // ...or only with the named field
+ for _, r := range replacements {
+ search := repl.ReplaceKnown(r.Search, "")
+ replace := repl.ReplaceKnown(r.Replace, "")
+ for hdrFieldName, vals := range hdr {
+ // see issue #4330 for why we don't simply use hdr[fieldName]
+ if http.CanonicalHeaderKey(hdrFieldName) != fieldName {
+ continue
+ }
+ for i := range vals {
+ if r.re != nil {
+ hdr[hdrFieldName][i] = r.re.ReplaceAllString(hdr[hdrFieldName][i], replace)
+ } else {
+ hdr[hdrFieldName][i] = strings.ReplaceAll(hdr[hdrFieldName][i], search, replace)
+ }
+ }
+ }
+ }
+ }
+}
+
+// ApplyToRequest applies ops to r, specially handling the Host
+// header which the standard library does not include with the
+// header map with all the others. This method mutates r.Host.
+func (ops HeaderOps) ApplyToRequest(r *http.Request) {
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+
+ // capture the current Host header so we can
+ // reset to it when we're done
+ origHost, hadHost := r.Header["Host"]
+
+ // append r.Host; this way, we know that our value
+ // was last in the list, and if an Add operation
+ // appended something else after it, that's probably
+ // fine because it's weird to have multiple Host
+ // headers anyway and presumably the one they added
+ // is the one they wanted
+ r.Header["Host"] = append(r.Header["Host"], r.Host)
+
+ // apply header operations
+ ops.ApplyTo(r.Header, repl)
+
+ // retrieve the last Host value (likely the one we appended)
+ if len(r.Header["Host"]) > 0 {
+ r.Host = r.Header["Host"][len(r.Header["Host"])-1]
+ } else {
+ r.Host = ""
+ }
+
+ // reset the Host header slice
+ if hadHost {
+ r.Header["Host"] = origHost
+ } else {
+ delete(r.Header, "Host")
+ }
+}
+
+// responseWriterWrapper defers response header
+// operations until WriteHeader is called.
+type responseWriterWrapper struct {
+ *caddyhttp.ResponseWriterWrapper
+ replacer *caddy.Replacer
+ require *caddyhttp.ResponseMatcher
+ headerOps *HeaderOps
+ wroteHeader bool
+}
+
+func (rww *responseWriterWrapper) WriteHeader(status int) {
+ if rww.wroteHeader {
+ return
+ }
+ // 1xx responses aren't final; just informational
+ if status < 100 || status > 199 {
+ rww.wroteHeader = true
+ }
+ if rww.require == nil || rww.require.Match(status, rww.ResponseWriterWrapper.Header()) {
+ if rww.headerOps != nil {
+ rww.headerOps.ApplyTo(rww.ResponseWriterWrapper.Header(), rww.replacer)
+ }
+ }
+ rww.ResponseWriterWrapper.WriteHeader(status)
+}
+
+func (rww *responseWriterWrapper) Write(d []byte) (int, error) {
+ if !rww.wroteHeader {
+ rww.WriteHeader(http.StatusOK)
+ }
+ return rww.ResponseWriterWrapper.Write(d)
+}
+
+// Interface guards
+var (
+ _ caddy.Provisioner = (*Handler)(nil)
+ _ caddyhttp.MiddlewareHandler = (*Handler)(nil)
+ _ http.ResponseWriter = (*responseWriterWrapper)(nil)
+)
diff --git a/modules/caddyhttp/headers/headers_test.go b/modules/caddyhttp/headers/headers_test.go
new file mode 100644
index 00000000000..9808c29c98d
--- /dev/null
+++ b/modules/caddyhttp/headers/headers_test.go
@@ -0,0 +1,274 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package headers
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func TestHandler(t *testing.T) {
+ for i, tc := range []struct {
+ handler Handler
+ reqHeader http.Header
+ respHeader http.Header
+ respStatusCode int
+ expectedReqHeader http.Header
+ expectedRespHeader http.Header
+ }{
+ {
+ handler: Handler{
+ Request: &HeaderOps{
+ Add: http.Header{
+ "Expose-Secrets": []string{"always"},
+ },
+ },
+ },
+ reqHeader: http.Header{
+ "Expose-Secrets": []string{"i'm serious"},
+ },
+ expectedReqHeader: http.Header{
+ "Expose-Secrets": []string{"i'm serious", "always"},
+ },
+ },
+ {
+ handler: Handler{
+ Request: &HeaderOps{
+ Set: http.Header{
+ "Who-Wins": []string{"batman"},
+ },
+ },
+ },
+ reqHeader: http.Header{
+ "Who-Wins": []string{"joker"},
+ },
+ expectedReqHeader: http.Header{
+ "Who-Wins": []string{"batman"},
+ },
+ },
+ {
+ handler: Handler{
+ Request: &HeaderOps{
+ Delete: []string{"Kick-Me"},
+ },
+ },
+ reqHeader: http.Header{
+ "Kick-Me": []string{"if you can"},
+ "Keep-Me": []string{"i swear i'm innocent"},
+ },
+ expectedReqHeader: http.Header{
+ "Keep-Me": []string{"i swear i'm innocent"},
+ },
+ },
+ {
+ handler: Handler{
+ Request: &HeaderOps{
+ Delete: []string{
+ "*-suffix",
+ "prefix-*",
+ "*_*",
+ },
+ },
+ },
+ reqHeader: http.Header{
+ "Header-Suffix": []string{"lalala"},
+ "Prefix-Test": []string{"asdf"},
+ "Host_Header": []string{"silly django... sigh"}, // see issue #4830
+ "Keep-Me": []string{"foofoofoo"},
+ },
+ expectedReqHeader: http.Header{
+ "Keep-Me": []string{"foofoofoo"},
+ },
+ },
+ {
+ handler: Handler{
+ Request: &HeaderOps{
+ Replace: map[string][]Replacement{
+ "Best-Server": {
+ Replacement{
+ Search: "NGINX",
+ Replace: "the Caddy web server",
+ },
+ Replacement{
+ SearchRegexp: `Apache(\d+)`,
+ Replace: "Caddy",
+ },
+ },
+ },
+ },
+ },
+ reqHeader: http.Header{
+ "Best-Server": []string{"it's NGINX, undoubtedly", "I love Apache2"},
+ },
+ expectedReqHeader: http.Header{
+ "Best-Server": []string{"it's the Caddy web server, undoubtedly", "I love Caddy"},
+ },
+ },
+ {
+ handler: Handler{
+ Response: &RespHeaderOps{
+ Require: &caddyhttp.ResponseMatcher{
+ Headers: http.Header{
+ "Cache-Control": nil,
+ },
+ },
+ HeaderOps: &HeaderOps{
+ Add: http.Header{
+ "Cache-Control": []string{"no-cache"},
+ },
+ },
+ },
+ },
+ respHeader: http.Header{},
+ expectedRespHeader: http.Header{
+ "Cache-Control": []string{"no-cache"},
+ },
+ },
+ { // same as above, but checks that response headers are left alone when "Require" conditions are unmet
+ handler: Handler{
+ Response: &RespHeaderOps{
+ Require: &caddyhttp.ResponseMatcher{
+ Headers: http.Header{
+ "Cache-Control": nil,
+ },
+ },
+ HeaderOps: &HeaderOps{
+ Add: http.Header{
+ "Cache-Control": []string{"no-cache"},
+ },
+ },
+ },
+ },
+ respHeader: http.Header{
+ "Cache-Control": []string{"something"},
+ },
+ expectedRespHeader: http.Header{
+ "Cache-Control": []string{"something"},
+ },
+ },
+ {
+ handler: Handler{
+ Response: &RespHeaderOps{
+ Require: &caddyhttp.ResponseMatcher{
+ Headers: http.Header{
+ "Cache-Control": []string{"no-cache"},
+ },
+ },
+ HeaderOps: &HeaderOps{
+ Delete: []string{"Cache-Control"},
+ },
+ },
+ },
+ respHeader: http.Header{
+ "Cache-Control": []string{"no-cache"},
+ },
+ expectedRespHeader: http.Header{},
+ },
+ {
+ handler: Handler{
+ Response: &RespHeaderOps{
+ Require: &caddyhttp.ResponseMatcher{
+ StatusCode: []int{5},
+ },
+ HeaderOps: &HeaderOps{
+ Add: http.Header{
+ "Fail-5xx": []string{"true"},
+ },
+ },
+ },
+ },
+ respStatusCode: 503,
+ respHeader: http.Header{},
+ expectedRespHeader: http.Header{
+ "Fail-5xx": []string{"true"},
+ },
+ },
+ {
+ handler: Handler{
+ Request: &HeaderOps{
+ Replace: map[string][]Replacement{
+ "Case-Insensitive": {
+ Replacement{
+ Search: "issue4330",
+ Replace: "issue #4330",
+ },
+ },
+ },
+ },
+ },
+ reqHeader: http.Header{
+ "case-insensitive": []string{"issue4330"},
+ "Other-Header": []string{"issue4330"},
+ },
+ expectedReqHeader: http.Header{
+ "case-insensitive": []string{"issue #4330"},
+ "Other-Header": []string{"issue4330"},
+ },
+ },
+ } {
+ rr := httptest.NewRecorder()
+
+ req := &http.Request{Header: tc.reqHeader}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ tc.handler.Provision(caddy.Context{})
+
+ next := nextHandler(func(w http.ResponseWriter, r *http.Request) error {
+ for k, hdrs := range tc.respHeader {
+ for _, v := range hdrs {
+ w.Header().Add(k, v)
+ }
+ }
+
+ status := 200
+ if tc.respStatusCode != 0 {
+ status = tc.respStatusCode
+ }
+ w.WriteHeader(status)
+
+ if tc.expectedReqHeader != nil && !reflect.DeepEqual(r.Header, tc.expectedReqHeader) {
+ return fmt.Errorf("expected request header %v, got %v", tc.expectedReqHeader, r.Header)
+ }
+
+ return nil
+ })
+
+ if err := tc.handler.ServeHTTP(rr, req, next); err != nil {
+ t.Errorf("Test %d: %v", i, err)
+ continue
+ }
+
+ actual := rr.Header()
+ if tc.expectedRespHeader != nil && !reflect.DeepEqual(actual, tc.expectedRespHeader) {
+ t.Errorf("Test %d: expected response header %v, got %v", i, tc.expectedRespHeader, actual)
+ continue
+ }
+ }
+}
+
+type nextHandler func(http.ResponseWriter, *http.Request) error
+
+func (f nextHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) error {
+ return f(w, r)
+}
diff --git a/modules/caddyhttp/http2listener.go b/modules/caddyhttp/http2listener.go
new file mode 100644
index 00000000000..51b356a7779
--- /dev/null
+++ b/modules/caddyhttp/http2listener.go
@@ -0,0 +1,102 @@
+package caddyhttp
+
+import (
+ "context"
+ "crypto/tls"
+ weakrand "math/rand"
+ "net"
+ "net/http"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http2"
+)
+
+// http2Listener wraps the listener to solve the following problems:
+// 1. server h2 natively without using h2c hack when listener handles tls connection but
+// don't return *tls.Conn
+// 2. graceful shutdown. the shutdown logic is copied from stdlib http.Server, it's an extra maintenance burden but
+// whatever, the shutdown logic maybe extracted to be used with h2c graceful shutdown. http2.Server supports graceful shutdown
+// sending GO_AWAY frame to connected clients, but doesn't track connection status. It requires explicit call of http2.ConfigureServer
+type http2Listener struct {
+ cnt uint64
+ net.Listener
+ server *http.Server
+ h2server *http2.Server
+}
+
+type connectionStateConn interface {
+ net.Conn
+ ConnectionState() tls.ConnectionState
+}
+
+func (h *http2Listener) Accept() (net.Conn, error) {
+ for {
+ conn, err := h.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+
+ if csc, ok := conn.(connectionStateConn); ok {
+ // *tls.Conn will return empty string because it's only populated after handshake is complete
+ if csc.ConnectionState().NegotiatedProtocol == http2.NextProtoTLS {
+ go h.serveHttp2(csc)
+ continue
+ }
+ }
+
+ return conn, nil
+ }
+}
+
+func (h *http2Listener) serveHttp2(csc connectionStateConn) {
+ atomic.AddUint64(&h.cnt, 1)
+ h.runHook(csc, http.StateNew)
+ defer func() {
+ csc.Close()
+ atomic.AddUint64(&h.cnt, ^uint64(0))
+ h.runHook(csc, http.StateClosed)
+ }()
+ h.h2server.ServeConn(csc, &http2.ServeConnOpts{
+ Context: h.server.ConnContext(context.Background(), csc),
+ BaseConfig: h.server,
+ Handler: h.server.Handler,
+ })
+}
+
+const shutdownPollIntervalMax = 500 * time.Millisecond
+
+func (h *http2Listener) Shutdown(ctx context.Context) error {
+ pollIntervalBase := time.Millisecond
+ nextPollInterval := func() time.Duration {
+ // Add 10% jitter.
+ //nolint:gosec
+ interval := pollIntervalBase + time.Duration(weakrand.Intn(int(pollIntervalBase/10)))
+ // Double and clamp for next time.
+ pollIntervalBase *= 2
+ if pollIntervalBase > shutdownPollIntervalMax {
+ pollIntervalBase = shutdownPollIntervalMax
+ }
+ return interval
+ }
+
+ timer := time.NewTimer(nextPollInterval())
+ defer timer.Stop()
+ for {
+ if atomic.LoadUint64(&h.cnt) == 0 {
+ return nil
+ }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-timer.C:
+ timer.Reset(nextPollInterval())
+ }
+ }
+}
+
+func (h *http2Listener) runHook(conn net.Conn, state http.ConnState) {
+ if h.server.ConnState != nil {
+ h.server.ConnState(conn, state)
+ }
+}
diff --git a/modules/caddyhttp/httpredirectlistener.go b/modules/caddyhttp/httpredirectlistener.go
new file mode 100644
index 00000000000..ce9ac030875
--- /dev/null
+++ b/modules/caddyhttp/httpredirectlistener.go
@@ -0,0 +1,174 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+)
+
+func init() {
+ caddy.RegisterModule(HTTPRedirectListenerWrapper{})
+}
+
+// HTTPRedirectListenerWrapper provides HTTP->HTTPS redirects for
+// connections that come on the TLS port as an HTTP request,
+// by detecting using the first few bytes that it's not a TLS
+// handshake, but instead an HTTP request.
+//
+// This is especially useful when using a non-standard HTTPS port.
+// A user may simply type the address in their browser without the
+// https:// scheme, which would cause the browser to attempt the
+// connection over HTTP, but this would cause a "Client sent an
+// HTTP request to an HTTPS server" error response.
+//
+// This listener wrapper must be placed BEFORE the "tls" listener
+// wrapper, for it to work properly.
+type HTTPRedirectListenerWrapper struct {
+ // MaxHeaderBytes is the maximum size to parse from a client's
+ // HTTP request headers. Default: 1 MB
+ MaxHeaderBytes int64 `json:"max_header_bytes,omitempty"`
+}
+
+func (HTTPRedirectListenerWrapper) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "caddy.listeners.http_redirect",
+ New: func() caddy.Module { return new(HTTPRedirectListenerWrapper) },
+ }
+}
+
+func (h *HTTPRedirectListenerWrapper) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ return nil
+}
+
+func (h *HTTPRedirectListenerWrapper) WrapListener(l net.Listener) net.Listener {
+ return &httpRedirectListener{l, h.MaxHeaderBytes}
+}
+
+// httpRedirectListener is listener that checks the first few bytes
+// of the request when the server is intended to accept HTTPS requests,
+// to respond to an HTTP request with a redirect.
+type httpRedirectListener struct {
+ net.Listener
+ maxHeaderBytes int64
+}
+
+// Accept waits for and returns the next connection to the listener,
+// wrapping it with a httpRedirectConn.
+func (l *httpRedirectListener) Accept() (net.Conn, error) {
+ c, err := l.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+
+ maxHeaderBytes := l.maxHeaderBytes
+ if maxHeaderBytes == 0 {
+ maxHeaderBytes = 1024 * 1024
+ }
+
+ return &httpRedirectConn{
+ Conn: c,
+ limit: maxHeaderBytes,
+ r: bufio.NewReader(c),
+ }, nil
+}
+
+type httpRedirectConn struct {
+ net.Conn
+ once bool
+ limit int64
+ r *bufio.Reader
+}
+
+// Read tries to peek at the first few bytes of the request, and if we get
+// an error reading the headers, and that error was due to the bytes looking
+// like an HTTP request, then we perform a HTTP->HTTPS redirect on the same
+// port as the original connection.
+func (c *httpRedirectConn) Read(p []byte) (int, error) {
+ if c.once {
+ return c.r.Read(p)
+ }
+ // no need to use sync.Once - net.Conn is not read from concurrently.
+ c.once = true
+
+ firstBytes, err := c.r.Peek(5)
+ if err != nil {
+ return 0, err
+ }
+
+ // If the request doesn't look like HTTP, then it's probably
+ // TLS bytes, and we don't need to do anything.
+ if !firstBytesLookLikeHTTP(firstBytes) {
+ return c.r.Read(p)
+ }
+
+ // From now on, we can be almost certain the request is HTTP.
+ // The returned error will be non nil and caller are expected to
+ // close the connection.
+
+ // Set the read limit, io.MultiReader is needed because
+ // when resetting, *bufio.Reader discards buffered data.
+ buffered, _ := c.r.Peek(c.r.Buffered())
+ mr := io.MultiReader(bytes.NewReader(buffered), c.Conn)
+ c.r.Reset(io.LimitReader(mr, c.limit))
+
+ // Parse the HTTP request, so we can get the Host and URL to redirect to.
+ req, err := http.ReadRequest(c.r)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't read HTTP request")
+ }
+
+ // Build the redirect response, using the same Host and URL,
+ // but replacing the scheme with https.
+ headers := make(http.Header)
+ headers.Add("Location", "https://"+req.Host+req.URL.String())
+ resp := &http.Response{
+ Proto: "HTTP/1.0",
+ Status: "308 Permanent Redirect",
+ StatusCode: 308,
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Header: headers,
+ }
+
+ err = resp.Write(c.Conn)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't write HTTP->HTTPS redirect")
+ }
+
+ return 0, fmt.Errorf("redirected HTTP request on HTTPS port")
+}
+
+// firstBytesLookLikeHTTP reports whether a TLS record header
+// looks like it might've been a misdirected plaintext HTTP request.
+func firstBytesLookLikeHTTP(hdr []byte) bool {
+ switch string(hdr[:5]) {
+ case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO":
+ return true
+ }
+ return false
+}
+
+var (
+ _ caddy.ListenerWrapper = (*HTTPRedirectListenerWrapper)(nil)
+ _ caddyfile.Unmarshaler = (*HTTPRedirectListenerWrapper)(nil)
+)
diff --git a/modules/caddyhttp/intercept/intercept.go b/modules/caddyhttp/intercept/intercept.go
new file mode 100644
index 00000000000..29889dcc0ed
--- /dev/null
+++ b/modules/caddyhttp/intercept/intercept.go
@@ -0,0 +1,352 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package intercept
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ caddy.RegisterModule(Intercept{})
+ httpcaddyfile.RegisterHandlerDirective("intercept", parseCaddyfile)
+}
+
+// Intercept is a middleware that intercepts then replaces or modifies the original response.
+// It can, for instance, be used to implement X-Sendfile/X-Accel-Redirect-like features
+// when using modules like FrankenPHP or Caddy Snake.
+//
+// EXPERIMENTAL: Subject to change or removal.
+type Intercept struct {
+ // List of handlers and their associated matchers to evaluate
+ // after successful response generation.
+ // The first handler that matches the original response will
+ // be invoked. The original response body will not be
+ // written to the client;
+ // it is up to the handler to finish handling the response.
+ //
+ // Three new placeholders are available in this handler chain:
+ // - `{http.intercept.status_code}` The status code from the response
+ // - `{http.intercept.header.*}` The headers from the response
+ HandleResponse []caddyhttp.ResponseHandler `json:"handle_response,omitempty"`
+
+ // Holds the named response matchers from the Caddyfile while adapting
+ responseMatchers map[string]caddyhttp.ResponseMatcher
+
+ // Holds the handle_response Caddyfile tokens while adapting
+ handleResponseSegments []*caddyfile.Dispenser
+
+ logger *zap.Logger
+}
+
+// CaddyModule returns the Caddy module information.
+//
+// EXPERIMENTAL: Subject to change or removal.
+func (Intercept) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.handlers.intercept",
+ New: func() caddy.Module { return new(Intercept) },
+ }
+}
+
+// Provision ensures that i is set up properly before use.
+//
+// EXPERIMENTAL: Subject to change or removal.
+func (irh *Intercept) Provision(ctx caddy.Context) error {
+ // set up any response routes
+ for i, rh := range irh.HandleResponse {
+ err := rh.Provision(ctx)
+ if err != nil {
+ return fmt.Errorf("provisioning response handler %d: %w", i, err)
+ }
+ }
+
+ irh.logger = ctx.Logger()
+
+ return nil
+}
+
+var bufPool = sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+}
+
+// TODO: handle status code replacement
+//
+// EXPERIMENTAL: Subject to change or removal.
+type interceptedResponseHandler struct {
+ caddyhttp.ResponseRecorder
+ replacer *caddy.Replacer
+ handler caddyhttp.ResponseHandler
+ handlerIndex int
+ statusCode int
+}
+
+// EXPERIMENTAL: Subject to change or removal.
+func (irh interceptedResponseHandler) WriteHeader(statusCode int) {
+ if irh.statusCode != 0 && (statusCode < 100 || statusCode >= 200) {
+ irh.ResponseRecorder.WriteHeader(irh.statusCode)
+
+ return
+ }
+
+ irh.ResponseRecorder.WriteHeader(statusCode)
+}
+
+// EXPERIMENTAL: Subject to change or removal.
+func (ir Intercept) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+ rec := interceptedResponseHandler{replacer: repl}
+ rec.ResponseRecorder = caddyhttp.NewResponseRecorder(w, buf, func(status int, header http.Header) bool {
+ // see if any response handler is configured for this original response
+ for i, rh := range ir.HandleResponse {
+ if rh.Match != nil && !rh.Match.Match(status, header) {
+ continue
+ }
+ rec.handler = rh
+ rec.handlerIndex = i
+
+ // if configured to only change the status code,
+ // do that then stream
+ if statusCodeStr := rh.StatusCode.String(); statusCodeStr != "" {
+ sc, err := strconv.Atoi(repl.ReplaceAll(statusCodeStr, ""))
+ if err != nil {
+ rec.statusCode = http.StatusInternalServerError
+ } else {
+ rec.statusCode = sc
+ }
+ }
+
+ return rec.statusCode == 0
+ }
+
+ return false
+ })
+
+ if err := next.ServeHTTP(rec, r); err != nil {
+ return err
+ }
+ if !rec.Buffered() {
+ return nil
+ }
+
+ // set up the replacer so that parts of the original response can be
+ // used for routing decisions
+ for field, value := range rec.Header() {
+ repl.Set("http.intercept.header."+field, strings.Join(value, ","))
+ }
+ repl.Set("http.intercept.status_code", rec.Status())
+
+ if c := ir.logger.Check(zapcore.DebugLevel, "handling response"); c != nil {
+ c.Write(zap.Int("handler", rec.handlerIndex))
+ }
+
+ // pass the request through the response handler routes
+ return rec.handler.Routes.Compile(next).ServeHTTP(w, r)
+}
+
+// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
+//
+// intercept [] {
+// # intercept original responses
+// @name {
+// status
+// header []
+// }
+// replace_status []
+// handle_response [] {
+//
+// }
+// }
+//
+// The FinalizeUnmarshalCaddyfile method should be called after this
+// to finalize parsing of "handle_response" blocks, if possible.
+//
+// EXPERIMENTAL: Subject to change or removal.
+func (i *Intercept) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // collect the response matchers defined as subdirectives
+ // prefixed with "@" for use with "handle_response" blocks
+ i.responseMatchers = make(map[string]caddyhttp.ResponseMatcher)
+
+ d.Next() // consume the directive name
+ for d.NextBlock(0) {
+ // if the subdirective has an "@" prefix then we
+ // parse it as a response matcher for use with "handle_response"
+ if strings.HasPrefix(d.Val(), matcherPrefix) {
+ err := caddyhttp.ParseNamedResponseMatcher(d.NewFromNextSegment(), i.responseMatchers)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ switch d.Val() {
+ case "handle_response":
+ // delegate the parsing of handle_response to the caller,
+ // since we need the httpcaddyfile.Helper to parse subroutes.
+ // See h.FinalizeUnmarshalCaddyfile
+ i.handleResponseSegments = append(i.handleResponseSegments, d.NewFromNextSegment())
+
+ case "replace_status":
+ args := d.RemainingArgs()
+ if len(args) != 1 && len(args) != 2 {
+ return d.Errf("must have one or two arguments: an optional response matcher, and a status code")
+ }
+
+ responseHandler := caddyhttp.ResponseHandler{}
+
+ if len(args) == 2 {
+ if !strings.HasPrefix(args[0], matcherPrefix) {
+ return d.Errf("must use a named response matcher, starting with '@'")
+ }
+ foundMatcher, ok := i.responseMatchers[args[0]]
+ if !ok {
+ return d.Errf("no named response matcher defined with name '%s'", args[0][1:])
+ }
+ responseHandler.Match = &foundMatcher
+ responseHandler.StatusCode = caddyhttp.WeakString(args[1])
+ } else if len(args) == 1 {
+ responseHandler.StatusCode = caddyhttp.WeakString(args[0])
+ }
+
+ // make sure there's no block, cause it doesn't make sense
+ if nesting := d.Nesting(); d.NextBlock(nesting) {
+ return d.Errf("cannot define routes for 'replace_status', use 'handle_response' instead.")
+ }
+
+ i.HandleResponse = append(
+ i.HandleResponse,
+ responseHandler,
+ )
+
+ default:
+ return d.Errf("unrecognized subdirective %s", d.Val())
+ }
+ }
+
+ return nil
+}
+
+// FinalizeUnmarshalCaddyfile finalizes the Caddyfile parsing which
+// requires having an httpcaddyfile.Helper to function, to parse subroutes.
+//
+// EXPERIMENTAL: Subject to change or removal.
+func (i *Intercept) FinalizeUnmarshalCaddyfile(helper httpcaddyfile.Helper) error {
+ for _, d := range i.handleResponseSegments {
+ // consume the "handle_response" token
+ d.Next()
+ args := d.RemainingArgs()
+
+ // TODO: Remove this check at some point in the future
+ if len(args) == 2 {
+ return d.Errf("configuring 'handle_response' for status code replacement is no longer supported. Use 'replace_status' instead.")
+ }
+
+ if len(args) > 1 {
+ return d.Errf("too many arguments for 'handle_response': %s", args)
+ }
+
+ var matcher *caddyhttp.ResponseMatcher
+ if len(args) == 1 {
+ // the first arg should always be a matcher.
+ if !strings.HasPrefix(args[0], matcherPrefix) {
+ return d.Errf("must use a named response matcher, starting with '@'")
+ }
+
+ foundMatcher, ok := i.responseMatchers[args[0]]
+ if !ok {
+ return d.Errf("no named response matcher defined with name '%s'", args[0][1:])
+ }
+ matcher = &foundMatcher
+ }
+
+ // parse the block as routes
+ handler, err := httpcaddyfile.ParseSegmentAsSubroute(helper.WithDispenser(d.NewFromNextSegment()))
+ if err != nil {
+ return err
+ }
+ subroute, ok := handler.(*caddyhttp.Subroute)
+ if !ok {
+ return helper.Errf("segment was not parsed as a subroute")
+ }
+ i.HandleResponse = append(
+ i.HandleResponse,
+ caddyhttp.ResponseHandler{
+ Match: matcher,
+ Routes: subroute.Routes,
+ },
+ )
+ }
+
+ // move the handle_response entries without a matcher to the end.
+ // we can't use sort.SliceStable because it will reorder the rest of the
+ // entries which may be undesirable because we don't have a good
+ // heuristic to use for sorting.
+ withoutMatchers := []caddyhttp.ResponseHandler{}
+ withMatchers := []caddyhttp.ResponseHandler{}
+ for _, hr := range i.HandleResponse {
+ if hr.Match == nil {
+ withoutMatchers = append(withoutMatchers, hr)
+ } else {
+ withMatchers = append(withMatchers, hr)
+ }
+ }
+ i.HandleResponse = append(withMatchers, withoutMatchers...)
+
+ // clean up the bits we only needed for adapting
+ i.handleResponseSegments = nil
+ i.responseMatchers = nil
+
+ return nil
+}
+
+const matcherPrefix = "@"
+
+func parseCaddyfile(helper httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
+ var ir Intercept
+ if err := ir.UnmarshalCaddyfile(helper.Dispenser); err != nil {
+ return nil, err
+ }
+
+ if err := ir.FinalizeUnmarshalCaddyfile(helper); err != nil {
+ return nil, err
+ }
+
+ return ir, nil
+}
+
+// Interface guards
+var (
+ _ caddy.Provisioner = (*Intercept)(nil)
+ _ caddyfile.Unmarshaler = (*Intercept)(nil)
+ _ caddyhttp.MiddlewareHandler = (*Intercept)(nil)
+)
diff --git a/modules/caddyhttp/invoke.go b/modules/caddyhttp/invoke.go
new file mode 100644
index 00000000000..97fd1cc31e2
--- /dev/null
+++ b/modules/caddyhttp/invoke.go
@@ -0,0 +1,56 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+func init() {
+ caddy.RegisterModule(Invoke{})
+}
+
+// Invoke implements a handler that compiles and executes a
+// named route that was defined on the server.
+//
+// EXPERIMENTAL: Subject to change or removal.
+type Invoke struct {
+ // Name is the key of the named route to execute
+ Name string `json:"name,omitempty"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (Invoke) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.handlers.invoke",
+ New: func() caddy.Module { return new(Invoke) },
+ }
+}
+
+func (invoke *Invoke) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
+ server := r.Context().Value(ServerCtxKey).(*Server)
+ if route, ok := server.NamedRoutes[invoke.Name]; ok {
+ return route.Compile(next).ServeHTTP(w, r)
+ }
+ return fmt.Errorf("invoke: route '%s' not found", invoke.Name)
+}
+
+// Interface guards
+var (
+ _ MiddlewareHandler = (*Invoke)(nil)
+)
diff --git a/modules/caddyhttp/ip_matchers.go b/modules/caddyhttp/ip_matchers.go
new file mode 100644
index 00000000000..5e0b356e7c8
--- /dev/null
+++ b/modules/caddyhttp/ip_matchers.go
@@ -0,0 +1,366 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/netip"
+ "reflect"
+ "strings"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common/types/ref"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/internal"
+)
+
+// MatchRemoteIP matches requests by the remote IP address,
+// i.e. the IP address of the direct connection to Caddy.
+type MatchRemoteIP struct {
+ // The IPs or CIDR ranges to match.
+ Ranges []string `json:"ranges,omitempty"`
+
+ // cidrs and zones vars should aligned always in the same
+ // length and indexes for matching later
+ cidrs []*netip.Prefix
+ zones []string
+ logger *zap.Logger
+}
+
+// MatchClientIP matches requests by the client IP address,
+// i.e. the resolved address, considering trusted proxies.
+type MatchClientIP struct {
+ // The IPs or CIDR ranges to match.
+ Ranges []string `json:"ranges,omitempty"`
+
+ // cidrs and zones vars should aligned always in the same
+ // length and indexes for matching later
+ cidrs []*netip.Prefix
+ zones []string
+ logger *zap.Logger
+}
+
+func init() {
+ caddy.RegisterModule(MatchRemoteIP{})
+ caddy.RegisterModule(MatchClientIP{})
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchRemoteIP) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.remote_ip",
+ New: func() caddy.Module { return new(MatchRemoteIP) },
+ }
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *MatchRemoteIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ for d.NextArg() {
+ if d.Val() == "forwarded" {
+ return d.Err("the 'forwarded' option is no longer supported; use the 'client_ip' matcher instead")
+ }
+ if d.Val() == "private_ranges" {
+ m.Ranges = append(m.Ranges, internal.PrivateRangesCIDR()...)
+ continue
+ }
+ m.Ranges = append(m.Ranges, d.Val())
+ }
+ if d.NextBlock(0) {
+ return d.Err("malformed remote_ip matcher: blocks are not supported")
+ }
+ }
+ return nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression remote_ip('192.168.0.0/16', '172.16.0.0/12', '10.0.0.0/8')
+func (MatchRemoteIP) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ // name of the macro, this is the function name that users see when writing expressions.
+ "remote_ip",
+ // name of the function that the macro will be rewritten to call.
+ "remote_ip_match_request_list",
+ // internal data type of the MatchPath value.
+ []*cel.Type{cel.ListType(cel.StringType)},
+ // function to convert a constant list of strings to a MatchPath instance.
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ strList, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+
+ m := MatchRemoteIP{}
+
+ for _, input := range strList.([]string) {
+ if input == "forwarded" {
+ return nil, errors.New("the 'forwarded' option is no longer supported; use the 'client_ip' matcher instead")
+ }
+ m.Ranges = append(m.Ranges, input)
+ }
+
+ err = m.Provision(ctx)
+ return m, err
+ },
+ )
+}
+
+// Provision parses m's IP ranges, either from IP or CIDR expressions.
+func (m *MatchRemoteIP) Provision(ctx caddy.Context) error {
+ m.logger = ctx.Logger()
+ cidrs, zones, err := provisionCidrsZonesFromRanges(m.Ranges)
+ if err != nil {
+ return err
+ }
+ m.cidrs = cidrs
+ m.zones = zones
+
+ return nil
+}
+
+// Match returns true if r matches m.
+func (m MatchRemoteIP) Match(r *http.Request) bool {
+ match, err := m.MatchWithError(r)
+ if err != nil {
+ SetVar(r.Context(), MatcherErrorVarKey, err)
+ }
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchRemoteIP) MatchWithError(r *http.Request) (bool, error) {
+ // if handshake is not finished, we infer 0-RTT that has
+ // not verified remote IP; could be spoofed, so we throw
+ // HTTP 425 status to tell the client to try again after
+ // the handshake is complete
+ if r.TLS != nil && !r.TLS.HandshakeComplete {
+ return false, Error(http.StatusTooEarly, fmt.Errorf("TLS handshake not complete, remote IP cannot be verified"))
+ }
+
+ address := r.RemoteAddr
+ clientIP, zoneID, err := parseIPZoneFromString(address)
+ if err != nil {
+ if c := m.logger.Check(zapcore.ErrorLevel, "getting remote "); c != nil {
+ c.Write(zap.Error(err))
+ }
+
+ return false, nil
+ }
+ matches, zoneFilter := matchIPByCidrZones(clientIP, zoneID, m.cidrs, m.zones)
+ if !matches && !zoneFilter {
+ if c := m.logger.Check(zapcore.DebugLevel, "zone ID from remote IP did not match"); c != nil {
+ c.Write(zap.String("zone", zoneID))
+ }
+ }
+ return matches, nil
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchClientIP) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.client_ip",
+ New: func() caddy.Module { return new(MatchClientIP) },
+ }
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *MatchClientIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ for d.NextArg() {
+ if d.Val() == "private_ranges" {
+ m.Ranges = append(m.Ranges, internal.PrivateRangesCIDR()...)
+ continue
+ }
+ m.Ranges = append(m.Ranges, d.Val())
+ }
+ if d.NextBlock(0) {
+ return d.Err("malformed client_ip matcher: blocks are not supported")
+ }
+ }
+ return nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression client_ip('192.168.0.0/16', '172.16.0.0/12', '10.0.0.0/8')
+func (MatchClientIP) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ // name of the macro, this is the function name that users see when writing expressions.
+ "client_ip",
+ // name of the function that the macro will be rewritten to call.
+ "client_ip_match_request_list",
+ // internal data type of the MatchPath value.
+ []*cel.Type{cel.ListType(cel.StringType)},
+ // function to convert a constant list of strings to a MatchPath instance.
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ strList, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+
+ m := MatchClientIP{
+ Ranges: strList.([]string),
+ }
+
+ err = m.Provision(ctx)
+ return m, err
+ },
+ )
+}
+
+// Provision parses m's IP ranges, either from IP or CIDR expressions.
+func (m *MatchClientIP) Provision(ctx caddy.Context) error {
+ m.logger = ctx.Logger()
+ cidrs, zones, err := provisionCidrsZonesFromRanges(m.Ranges)
+ if err != nil {
+ return err
+ }
+ m.cidrs = cidrs
+ m.zones = zones
+ return nil
+}
+
+// Match returns true if r matches m.
+func (m MatchClientIP) Match(r *http.Request) bool {
+ match, err := m.MatchWithError(r)
+ if err != nil {
+ SetVar(r.Context(), MatcherErrorVarKey, err)
+ }
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchClientIP) MatchWithError(r *http.Request) (bool, error) {
+ // if handshake is not finished, we infer 0-RTT that has
+ // not verified remote IP; could be spoofed, so we throw
+ // HTTP 425 status to tell the client to try again after
+ // the handshake is complete
+ if r.TLS != nil && !r.TLS.HandshakeComplete {
+ return false, Error(http.StatusTooEarly, fmt.Errorf("TLS handshake not complete, remote IP cannot be verified"))
+ }
+
+ address := GetVar(r.Context(), ClientIPVarKey).(string)
+ clientIP, zoneID, err := parseIPZoneFromString(address)
+ if err != nil {
+ m.logger.Error("getting client IP", zap.Error(err))
+ return false, nil
+ }
+ matches, zoneFilter := matchIPByCidrZones(clientIP, zoneID, m.cidrs, m.zones)
+ if !matches && !zoneFilter {
+ m.logger.Debug("zone ID from client IP did not match", zap.String("zone", zoneID))
+ }
+ return matches, nil
+}
+
+func provisionCidrsZonesFromRanges(ranges []string) ([]*netip.Prefix, []string, error) {
+ cidrs := []*netip.Prefix{}
+ zones := []string{}
+ repl := caddy.NewReplacer()
+ for _, str := range ranges {
+ str = repl.ReplaceAll(str, "")
+ // Exclude the zone_id from the IP
+ if strings.Contains(str, "%") {
+ split := strings.Split(str, "%")
+ str = split[0]
+ // write zone identifiers in m.zones for matching later
+ zones = append(zones, split[1])
+ } else {
+ zones = append(zones, "")
+ }
+ if strings.Contains(str, "/") {
+ ipNet, err := netip.ParsePrefix(str)
+ if err != nil {
+ return nil, nil, fmt.Errorf("parsing CIDR expression '%s': %v", str, err)
+ }
+ cidrs = append(cidrs, &ipNet)
+ } else {
+ ipAddr, err := netip.ParseAddr(str)
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid IP address: '%s': %v", str, err)
+ }
+ ipNew := netip.PrefixFrom(ipAddr, ipAddr.BitLen())
+ cidrs = append(cidrs, &ipNew)
+ }
+ }
+ return cidrs, zones, nil
+}
+
+func parseIPZoneFromString(address string) (netip.Addr, string, error) {
+ ipStr, _, err := net.SplitHostPort(address)
+ if err != nil {
+ ipStr = address // OK; probably didn't have a port
+ }
+
+ // Some IPv6-Addresses can contain zone identifiers at the end,
+ // which are separated with "%"
+ zoneID := ""
+ if strings.Contains(ipStr, "%") {
+ split := strings.Split(ipStr, "%")
+ ipStr = split[0]
+ zoneID = split[1]
+ }
+
+ ipAddr, err := netip.ParseAddr(ipStr)
+ if err != nil {
+ return netip.IPv4Unspecified(), "", err
+ }
+
+ return ipAddr, zoneID, nil
+}
+
+func matchIPByCidrZones(clientIP netip.Addr, zoneID string, cidrs []*netip.Prefix, zones []string) (bool, bool) {
+ zoneFilter := true
+ for i, ipRange := range cidrs {
+ if ipRange.Contains(clientIP) {
+ // Check if there are zone filters assigned and if they match.
+ if zones[i] == "" || zoneID == zones[i] {
+ return true, false
+ }
+ zoneFilter = false
+ }
+ }
+ return false, zoneFilter
+}
+
+// Interface guards
+var (
+ _ RequestMatcherWithError = (*MatchRemoteIP)(nil)
+ _ caddy.Provisioner = (*MatchRemoteIP)(nil)
+ _ caddyfile.Unmarshaler = (*MatchRemoteIP)(nil)
+ _ CELLibraryProducer = (*MatchRemoteIP)(nil)
+
+ _ RequestMatcherWithError = (*MatchClientIP)(nil)
+ _ caddy.Provisioner = (*MatchClientIP)(nil)
+ _ caddyfile.Unmarshaler = (*MatchClientIP)(nil)
+ _ CELLibraryProducer = (*MatchClientIP)(nil)
+)
diff --git a/modules/caddyhttp/ip_range.go b/modules/caddyhttp/ip_range.go
new file mode 100644
index 00000000000..bfd76c14c3d
--- /dev/null
+++ b/modules/caddyhttp/ip_range.go
@@ -0,0 +1,137 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "fmt"
+ "net/http"
+ "net/netip"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/internal"
+)
+
+func init() {
+ caddy.RegisterModule(StaticIPRange{})
+}
+
+// IPRangeSource gets a list of IP ranges.
+//
+// The request is passed as an argument to allow plugin implementations
+// to have more flexibility. But, a plugin MUST NOT modify the request.
+// The caller will have read the `r.RemoteAddr` before getting IP ranges.
+//
+// This should be a very fast function -- instant if possible.
+// The list of IP ranges should be sourced as soon as possible if loaded
+// from an external source (i.e. initially loaded during Provisioning),
+// so that it's ready to be used when requests start getting handled.
+// A read lock should probably be used to get the cached value if the
+// ranges can change at runtime (e.g. periodically refreshed).
+// Using a `caddy.UsagePool` may be a good idea to avoid having refetch
+// the values when a config reload occurs, which would waste time.
+//
+// If the list of IP ranges cannot be sourced, then provisioning SHOULD
+// fail. Getting the IP ranges at runtime MUST NOT fail, because it would
+// cancel incoming requests. If refreshing the list fails, then the
+// previous list of IP ranges should continue to be returned so that the
+// server can continue to operate normally.
+type IPRangeSource interface {
+ GetIPRanges(*http.Request) []netip.Prefix
+}
+
+// StaticIPRange provides a static range of IP address prefixes (CIDRs).
+type StaticIPRange struct {
+ // A static list of IP ranges (supports CIDR notation).
+ Ranges []string `json:"ranges,omitempty"`
+
+ // Holds the parsed CIDR ranges from Ranges.
+ ranges []netip.Prefix
+}
+
+// CaddyModule returns the Caddy module information.
+func (StaticIPRange) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.ip_sources.static",
+ New: func() caddy.Module { return new(StaticIPRange) },
+ }
+}
+
+func (s *StaticIPRange) Provision(ctx caddy.Context) error {
+ for _, str := range s.Ranges {
+ prefix, err := CIDRExpressionToPrefix(str)
+ if err != nil {
+ return err
+ }
+ s.ranges = append(s.ranges, prefix)
+ }
+
+ return nil
+}
+
+func (s *StaticIPRange) GetIPRanges(_ *http.Request) []netip.Prefix {
+ return s.ranges
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *StaticIPRange) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ if !d.Next() {
+ return nil
+ }
+ for d.NextArg() {
+ if d.Val() == "private_ranges" {
+ m.Ranges = append(m.Ranges, internal.PrivateRangesCIDR()...)
+ continue
+ }
+ m.Ranges = append(m.Ranges, d.Val())
+ }
+ return nil
+}
+
+// CIDRExpressionToPrefix takes a string which could be either a
+// CIDR expression or a single IP address, and returns a netip.Prefix.
+func CIDRExpressionToPrefix(expr string) (netip.Prefix, error) {
+ // Having a slash means it should be a CIDR expression
+ if strings.Contains(expr, "/") {
+ prefix, err := netip.ParsePrefix(expr)
+ if err != nil {
+ return netip.Prefix{}, fmt.Errorf("parsing CIDR expression: '%s': %v", expr, err)
+ }
+ return prefix, nil
+ }
+
+ // Otherwise it's likely a single IP address
+ parsed, err := netip.ParseAddr(expr)
+ if err != nil {
+ return netip.Prefix{}, fmt.Errorf("invalid IP address: '%s': %v", expr, err)
+ }
+ prefix := netip.PrefixFrom(parsed, parsed.BitLen())
+ return prefix, nil
+}
+
+// Interface guards
+var (
+ _ caddy.Provisioner = (*StaticIPRange)(nil)
+ _ caddyfile.Unmarshaler = (*StaticIPRange)(nil)
+ _ IPRangeSource = (*StaticIPRange)(nil)
+)
+
+// PrivateRangesCIDR returns a list of private CIDR range
+// strings, which can be used as a configuration shortcut.
+// Note: this function is used at least by mholt/caddy-l4.
+func PrivateRangesCIDR() []string {
+ return internal.PrivateRangesCIDR()
+}
diff --git a/modules/caddyhttp/logging.go b/modules/caddyhttp/logging.go
new file mode 100644
index 00000000000..87298ac3c6f
--- /dev/null
+++ b/modules/caddyhttp/logging.go
@@ -0,0 +1,256 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "encoding/json"
+ "errors"
+ "net"
+ "net/http"
+ "strings"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+// ServerLogConfig describes a server's logging configuration. If
+// enabled without customization, all requests to this server are
+// logged to the default logger; logger destinations may be
+// customized per-request-host.
+type ServerLogConfig struct {
+ // The default logger name for all logs emitted by this server for
+ // hostnames that are not in the logger_names map.
+ DefaultLoggerName string `json:"default_logger_name,omitempty"`
+
+ // LoggerNames maps request hostnames to one or more custom logger
+ // names. For example, a mapping of `"example.com": ["example"]` would
+ // cause access logs from requests with a Host of example.com to be
+ // emitted by a logger named "http.log.access.example". If there are
+ // multiple logger names, then the log will be emitted to all of them.
+ // If the logger name is an empty, the default logger is used, i.e.
+ // the logger "http.log.access".
+ //
+ // Keys must be hostnames (without ports), and may contain wildcards
+ // to match subdomains. The value is an array of logger names.
+ //
+ // For backwards compatibility, if the value is a string, it is treated
+ // as a single-element array.
+ LoggerNames map[string]StringArray `json:"logger_names,omitempty"`
+
+ // By default, all requests to this server will be logged if
+ // access logging is enabled. This field lists the request
+ // hosts for which access logging should be disabled.
+ SkipHosts []string `json:"skip_hosts,omitempty"`
+
+ // If true, requests to any host not appearing in the
+ // logger_names map will not be logged.
+ SkipUnmappedHosts bool `json:"skip_unmapped_hosts,omitempty"`
+
+ // If true, credentials that are otherwise omitted, will be logged.
+ // The definition of credentials is defined by https://fetch.spec.whatwg.org/#credentials,
+ // and this includes some request and response headers, i.e `Cookie`,
+ // `Set-Cookie`, `Authorization`, and `Proxy-Authorization`.
+ ShouldLogCredentials bool `json:"should_log_credentials,omitempty"`
+
+ // Log each individual handler that is invoked.
+ // Requires that the log emit at DEBUG level.
+ //
+ // NOTE: This may log the configuration of your
+ // HTTP handler modules; do not enable this in
+ // insecure contexts when there is sensitive
+ // data in the configuration.
+ //
+ // EXPERIMENTAL: Subject to change or removal.
+ Trace bool `json:"trace,omitempty"`
+}
+
+// wrapLogger wraps logger in one or more logger named
+// according to user preferences for the given host.
+func (slc ServerLogConfig) wrapLogger(logger *zap.Logger, req *http.Request) []*zap.Logger {
+ // using the `log_name` directive or the `access_logger_names` variable,
+ // the logger names can be overridden for the current request
+ if names := GetVar(req.Context(), AccessLoggerNameVarKey); names != nil {
+ if namesSlice, ok := names.([]any); ok {
+ loggers := make([]*zap.Logger, 0, len(namesSlice))
+ for _, loggerName := range namesSlice {
+ // no name, use the default logger
+ if loggerName == "" {
+ loggers = append(loggers, logger)
+ continue
+ }
+ // make a logger with the given name
+ loggers = append(loggers, logger.Named(loggerName.(string)))
+ }
+ return loggers
+ }
+ }
+
+ // get the hostname from the request, with the port number stripped
+ host, _, err := net.SplitHostPort(req.Host)
+ if err != nil {
+ host = req.Host
+ }
+
+ // get the logger names for this host from the config
+ hosts := slc.getLoggerHosts(host)
+
+ // make a list of named loggers, or the default logger
+ loggers := make([]*zap.Logger, 0, len(hosts))
+ for _, loggerName := range hosts {
+ // no name, use the default logger
+ if loggerName == "" {
+ loggers = append(loggers, logger)
+ continue
+ }
+ // make a logger with the given name
+ loggers = append(loggers, logger.Named(loggerName))
+ }
+ return loggers
+}
+
+func (slc ServerLogConfig) getLoggerHosts(host string) []string {
+ // try the exact hostname first
+ if hosts, ok := slc.LoggerNames[host]; ok {
+ return hosts
+ }
+
+ // try matching wildcard domains if other non-specific loggers exist
+ labels := strings.Split(host, ".")
+ for i := range labels {
+ if labels[i] == "" {
+ continue
+ }
+ labels[i] = "*"
+ wildcardHost := strings.Join(labels, ".")
+ if hosts, ok := slc.LoggerNames[wildcardHost]; ok {
+ return hosts
+ }
+ }
+
+ return []string{slc.DefaultLoggerName}
+}
+
+func (slc *ServerLogConfig) clone() *ServerLogConfig {
+ clone := &ServerLogConfig{
+ DefaultLoggerName: slc.DefaultLoggerName,
+ LoggerNames: make(map[string]StringArray),
+ SkipHosts: append([]string{}, slc.SkipHosts...),
+ SkipUnmappedHosts: slc.SkipUnmappedHosts,
+ ShouldLogCredentials: slc.ShouldLogCredentials,
+ }
+ for k, v := range slc.LoggerNames {
+ clone.LoggerNames[k] = append([]string{}, v...)
+ }
+ return clone
+}
+
+// StringArray is a slices of strings, but also accepts
+// a single string as a value when JSON unmarshaling,
+// converting it to a slice of one string.
+type StringArray []string
+
+// UnmarshalJSON satisfies json.Unmarshaler.
+func (sa *StringArray) UnmarshalJSON(b []byte) error {
+ var jsonObj any
+ err := json.Unmarshal(b, &jsonObj)
+ if err != nil {
+ return err
+ }
+ switch obj := jsonObj.(type) {
+ case string:
+ *sa = StringArray([]string{obj})
+ return nil
+ case []any:
+ s := make([]string, 0, len(obj))
+ for _, v := range obj {
+ value, ok := v.(string)
+ if !ok {
+ return errors.New("unsupported type")
+ }
+ s = append(s, value)
+ }
+ *sa = StringArray(s)
+ return nil
+ }
+ return errors.New("unsupported type")
+}
+
+// errLogValues inspects err and returns the status code
+// to use, the error log message, and any extra fields.
+// If err is a HandlerError, the returned values will
+// have richer information.
+func errLogValues(err error) (status int, msg string, fields func() []zapcore.Field) {
+ var handlerErr HandlerError
+ if errors.As(err, &handlerErr) {
+ status = handlerErr.StatusCode
+ if handlerErr.Err == nil {
+ msg = err.Error()
+ } else {
+ msg = handlerErr.Err.Error()
+ }
+ fields = func() []zapcore.Field {
+ return []zapcore.Field{
+ zap.Int("status", handlerErr.StatusCode),
+ zap.String("err_id", handlerErr.ID),
+ zap.String("err_trace", handlerErr.Trace),
+ }
+ }
+ return
+ }
+ fields = func() []zapcore.Field {
+ return []zapcore.Field{
+ zap.Error(err),
+ }
+ }
+ status = http.StatusInternalServerError
+ msg = err.Error()
+ return
+}
+
+// ExtraLogFields is a list of extra fields to log with every request.
+type ExtraLogFields struct {
+ fields []zapcore.Field
+}
+
+// Add adds a field to the list of extra fields to log.
+func (e *ExtraLogFields) Add(field zap.Field) {
+ e.fields = append(e.fields, field)
+}
+
+// Set sets a field in the list of extra fields to log.
+// If the field already exists, it is replaced.
+func (e *ExtraLogFields) Set(field zap.Field) {
+ for i := range e.fields {
+ if e.fields[i].Key == field.Key {
+ e.fields[i] = field
+ return
+ }
+ }
+ e.fields = append(e.fields, field)
+}
+
+const (
+ // Variable name used to indicate that this request
+ // should be omitted from the access logs
+ LogSkipVar string = "log_skip"
+
+ // For adding additional fields to the access logs
+ ExtraLogFieldsCtxKey caddy.CtxKey = "extra_log_fields"
+
+ // Variable name used to indicate the logger to be used
+ AccessLoggerNameVarKey string = "access_logger_names"
+)
diff --git a/modules/caddyhttp/logging/caddyfile.go b/modules/caddyhttp/logging/caddyfile.go
new file mode 100644
index 00000000000..010b48919a1
--- /dev/null
+++ b/modules/caddyhttp/logging/caddyfile.go
@@ -0,0 +1,53 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logging
+
+import (
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ httpcaddyfile.RegisterHandlerDirective("log_append", parseCaddyfile)
+}
+
+// parseCaddyfile sets up the log_append handler from Caddyfile tokens. Syntax:
+//
+// log_append []
+func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
+ handler := new(LogAppend)
+ err := handler.UnmarshalCaddyfile(h.Dispenser)
+ return handler, err
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (h *LogAppend) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ d.Next() // consume directive name
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ h.Key = d.Val()
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ h.Value = d.Val()
+ return nil
+}
+
+// Interface guards
+var (
+ _ caddyfile.Unmarshaler = (*LogAppend)(nil)
+)
diff --git a/modules/caddyhttp/logging/logadd.go b/modules/caddyhttp/logging/logadd.go
new file mode 100644
index 00000000000..3b554367f93
--- /dev/null
+++ b/modules/caddyhttp/logging/logadd.go
@@ -0,0 +1,94 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logging
+
+import (
+ "net/http"
+ "strings"
+
+ "go.uber.org/zap"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ caddy.RegisterModule(LogAppend{})
+}
+
+// LogAppend implements a middleware that takes a key and value, where
+// the key is the name of a log field and the value is a placeholder,
+// or variable key, or constant value to use for that field.
+type LogAppend struct {
+ // Key is the name of the log field.
+ Key string `json:"key,omitempty"`
+
+ // Value is the value to use for the log field.
+ // If it is a placeholder (with surrounding `{}`),
+ // it will be evaluated when the log is written.
+ // If the value is a key that exists in the `vars`
+ // map, the value of that key will be used. Otherwise
+ // the value will be used as-is as a constant string.
+ Value string `json:"value,omitempty"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (LogAppend) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.handlers.log_append",
+ New: func() caddy.Module { return new(LogAppend) },
+ }
+}
+
+func (h LogAppend) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ // Run the next handler in the chain first.
+ // If an error occurs, we still want to add
+ // any extra log fields that we can, so we
+ // hold onto the error and return it later.
+ handlerErr := next.ServeHTTP(w, r)
+
+ // On the way back up the chain, add the extra log field
+ ctx := r.Context()
+
+ vars := ctx.Value(caddyhttp.VarsCtxKey).(map[string]any)
+ repl := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+ extra := ctx.Value(caddyhttp.ExtraLogFieldsCtxKey).(*caddyhttp.ExtraLogFields)
+
+ var varValue any
+ if strings.HasPrefix(h.Value, "{") &&
+ strings.HasSuffix(h.Value, "}") &&
+ strings.Count(h.Value, "{") == 1 {
+ // the value looks like a placeholder, so get its value
+ varValue, _ = repl.Get(strings.Trim(h.Value, "{}"))
+ } else if val, ok := vars[h.Value]; ok {
+ // the value is a key in the vars map
+ varValue = val
+ } else {
+ // the value is a constant string
+ varValue = h.Value
+ }
+
+ // Add the field to the extra log fields.
+ // We use zap.Any because it will reflect
+ // to the correct type for us.
+ extra.Add(zap.Any(h.Key, varValue))
+
+ return handlerErr
+}
+
+// Interface guards
+var (
+ _ caddyhttp.MiddlewareHandler = (*LogAppend)(nil)
+)
diff --git a/modules/caddyhttp/map/caddyfile.go b/modules/caddyhttp/map/caddyfile.go
new file mode 100644
index 00000000000..8f7b5d34e6b
--- /dev/null
+++ b/modules/caddyhttp/map/caddyfile.go
@@ -0,0 +1,114 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package maphandler
+
+import (
+ "strings"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ httpcaddyfile.RegisterHandlerDirective("map", parseCaddyfile)
+}
+
+// parseCaddyfile sets up the map handler from Caddyfile tokens. Syntax:
+//
+// map [] {
+// [~]
+// default
+// }
+//
+// If the input value is prefixed with a tilde (~), then the input will be parsed as a
+// regular expression.
+//
+// The Caddyfile adapter treats outputs that are a literal hyphen (-) as a null/nil
+// value. This is useful if you want to fall back to default for that particular output.
+//
+// The number of outputs for each mapping must not be more than the number of destinations.
+// However, for convenience, there may be fewer outputs than destinations and any missing
+// outputs will be filled in implicitly.
+func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive name
+
+ var handler Handler
+
+ // source
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ handler.Source = h.Val()
+
+ // destinations
+ handler.Destinations = h.RemainingArgs()
+ if len(handler.Destinations) == 0 {
+ return nil, h.Err("missing destination argument(s)")
+ }
+ for _, dest := range handler.Destinations {
+ if shorthand := httpcaddyfile.WasReplacedPlaceholderShorthand(dest); shorthand != "" {
+ return nil, h.Errf("destination %s conflicts with a Caddyfile placeholder shorthand", shorthand)
+ }
+ }
+
+ // mappings
+ for h.NextBlock(0) {
+ // defaults are a special case
+ if h.Val() == "default" {
+ if len(handler.Defaults) > 0 {
+ return nil, h.Err("defaults already defined")
+ }
+ handler.Defaults = h.RemainingArgs()
+ for len(handler.Defaults) < len(handler.Destinations) {
+ handler.Defaults = append(handler.Defaults, "")
+ }
+ continue
+ }
+
+ // every line maps an input value to one or more outputs
+ in := h.Val()
+ var outs []any
+ for h.NextArg() {
+ val := h.ScalarVal()
+ if val == "-" {
+ outs = append(outs, nil)
+ } else {
+ outs = append(outs, val)
+ }
+ }
+
+ // cannot have more outputs than destinations
+ if len(outs) > len(handler.Destinations) {
+ return nil, h.Err("too many outputs")
+ }
+
+ // for convenience, can have fewer outputs than destinations, but the
+ // underlying handler won't accept that, so we fill in nil values
+ for len(outs) < len(handler.Destinations) {
+ outs = append(outs, nil)
+ }
+
+ // create the mapping
+ mapping := Mapping{Outputs: outs}
+ if strings.HasPrefix(in, "~") {
+ mapping.InputRegexp = in[1:]
+ } else {
+ mapping.Input = in
+ }
+
+ handler.Mappings = append(handler.Mappings, mapping)
+ }
+ return handler, nil
+}
diff --git a/modules/caddyhttp/map/map.go b/modules/caddyhttp/map/map.go
new file mode 100644
index 00000000000..d02085e7633
--- /dev/null
+++ b/modules/caddyhttp/map/map.go
@@ -0,0 +1,196 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package maphandler
+
+import (
+ "fmt"
+ "net/http"
+ "regexp"
+ "slices"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ caddy.RegisterModule(Handler{})
+}
+
+// Handler implements a middleware that maps inputs to outputs. Specifically, it
+// compares a source value against the map inputs, and for one that matches, it
+// applies the output values to each destination. Destinations become placeholder
+// names.
+//
+// Mapped placeholders are not evaluated until they are used, so even for very
+// large mappings, this handler is quite efficient.
+type Handler struct {
+ // Source is the placeholder from which to get the input value.
+ Source string `json:"source,omitempty"`
+
+ // Destinations are the names of placeholders in which to store the outputs.
+ // Destination values should be wrapped in braces, for example, {my_placeholder}.
+ Destinations []string `json:"destinations,omitempty"`
+
+ // Mappings from source values (inputs) to destination values (outputs).
+ // The first matching, non-nil mapping will be applied.
+ Mappings []Mapping `json:"mappings,omitempty"`
+
+ // If no mappings match or if the mapped output is null/nil, the associated
+ // default output will be applied (optional).
+ Defaults []string `json:"defaults,omitempty"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (Handler) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.handlers.map",
+ New: func() caddy.Module { return new(Handler) },
+ }
+}
+
+// Provision sets up h.
+func (h *Handler) Provision(_ caddy.Context) error {
+ for j, dest := range h.Destinations {
+ if strings.Count(dest, "{") != 1 || !strings.HasPrefix(dest, "{") {
+ return fmt.Errorf("destination must be a placeholder and only a placeholder")
+ }
+ h.Destinations[j] = strings.Trim(dest, "{}")
+ }
+
+ for i, m := range h.Mappings {
+ if m.InputRegexp == "" {
+ continue
+ }
+ var err error
+ h.Mappings[i].re, err = regexp.Compile(m.InputRegexp)
+ if err != nil {
+ return fmt.Errorf("compiling regexp for mapping %d: %v", i, err)
+ }
+ }
+
+ // TODO: improve efficiency even further by using an actual map type
+ // for the non-regexp mappings, OR sort them and do a binary search
+
+ return nil
+}
+
+// Validate ensures that h is configured properly.
+func (h *Handler) Validate() error {
+ nDest, nDef := len(h.Destinations), len(h.Defaults)
+ if nDef > 0 && nDef != nDest {
+ return fmt.Errorf("%d destinations != %d defaults", nDest, nDef)
+ }
+
+ seen := make(map[string]int)
+ for i, m := range h.Mappings {
+ // prevent confusing/ambiguous mappings
+ if m.Input != "" && m.InputRegexp != "" {
+ return fmt.Errorf("mapping %d has both input and input_regexp fields specified, which is confusing", i)
+ }
+
+ // prevent duplicate mappings
+ input := m.Input
+ if m.InputRegexp != "" {
+ input = m.InputRegexp
+ }
+ if prev, ok := seen[input]; ok {
+ return fmt.Errorf("mapping %d has a duplicate input '%s' previously used with mapping %d", i, input, prev)
+ }
+ seen[input] = i
+
+ // ensure mappings have 1:1 output-to-destination correspondence
+ nOut := len(m.Outputs)
+ if nOut != nDest {
+ return fmt.Errorf("mapping %d has %d outputs but there are %d destinations defined", i, nOut, nDest)
+ }
+ }
+
+ return nil
+}
+
+func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+
+ // defer work until a variable is actually evaluated by using replacer's Map callback
+ repl.Map(func(key string) (any, bool) {
+ // return early if the variable is not even a configured destination
+ destIdx := slices.Index(h.Destinations, key)
+ if destIdx < 0 {
+ return nil, false
+ }
+
+ input := repl.ReplaceAll(h.Source, "")
+
+ // find the first mapping matching the input and return
+ // the requested destination/output value
+ for _, m := range h.Mappings {
+ output := m.Outputs[destIdx]
+ if output == nil {
+ continue
+ }
+ outputStr := caddy.ToString(output)
+
+ // evaluate regular expression if configured
+ if m.re != nil {
+ var result []byte
+ matches := m.re.FindStringSubmatchIndex(input)
+ if matches == nil {
+ continue
+ }
+ result = m.re.ExpandString(result, outputStr, input, matches)
+ return string(result), true
+ }
+
+ // otherwise simple string comparison
+ if input == m.Input {
+ return repl.ReplaceAll(outputStr, ""), true
+ }
+ }
+
+ // fall back to default if no match or if matched nil value
+ if len(h.Defaults) > destIdx {
+ return repl.ReplaceAll(h.Defaults[destIdx], ""), true
+ }
+
+ return nil, true
+ })
+
+ return next.ServeHTTP(w, r)
+}
+
+// Mapping describes a mapping from input to outputs.
+type Mapping struct {
+ // The input value to match. Must be distinct from other mappings.
+ // Mutually exclusive to input_regexp.
+ Input string `json:"input,omitempty"`
+
+ // The input regular expression to match. Mutually exclusive to input.
+ InputRegexp string `json:"input_regexp,omitempty"`
+
+ // Upon a match with the input, each output is positionally correlated
+ // with each destination of the parent handler. An output that is null
+ // (nil) will be treated as if it was not mapped at all.
+ Outputs []any `json:"outputs,omitempty"`
+
+ re *regexp.Regexp
+}
+
+// Interface guards
+var (
+ _ caddy.Provisioner = (*Handler)(nil)
+ _ caddy.Validator = (*Handler)(nil)
+ _ caddyhttp.MiddlewareHandler = (*Handler)(nil)
+)
diff --git a/modules/caddyhttp/map/map_test.go b/modules/caddyhttp/map/map_test.go
new file mode 100644
index 00000000000..3ff5e7115a7
--- /dev/null
+++ b/modules/caddyhttp/map/map_test.go
@@ -0,0 +1,152 @@
+package maphandler
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func TestHandler(t *testing.T) {
+ for i, tc := range []struct {
+ handler Handler
+ reqURI string
+ expect map[string]any
+ }{
+ {
+ reqURI: "/foo",
+ handler: Handler{
+ Source: "{http.request.uri.path}",
+ Destinations: []string{"{output}"},
+ Mappings: []Mapping{
+ {
+ Input: "/foo",
+ Outputs: []any{"FOO"},
+ },
+ },
+ },
+ expect: map[string]any{
+ "output": "FOO",
+ },
+ },
+ {
+ reqURI: "/abcdef",
+ handler: Handler{
+ Source: "{http.request.uri.path}",
+ Destinations: []string{"{output}"},
+ Mappings: []Mapping{
+ {
+ InputRegexp: "(/abc)",
+ Outputs: []any{"ABC"},
+ },
+ },
+ },
+ expect: map[string]any{
+ "output": "ABC",
+ },
+ },
+ {
+ reqURI: "/ABCxyzDEF",
+ handler: Handler{
+ Source: "{http.request.uri.path}",
+ Destinations: []string{"{output}"},
+ Mappings: []Mapping{
+ {
+ InputRegexp: "(xyz)",
+ Outputs: []any{"...${1}..."},
+ },
+ },
+ },
+ expect: map[string]any{
+ "output": "...xyz...",
+ },
+ },
+ {
+ // Test case from https://caddy.community/t/map-directive-and-regular-expressions/13866/14?u=matt
+ reqURI: "/?s=0%27+AND+%28SELECT+0+FROM+%28SELECT+count%28%2A%29%2C+CONCAT%28%28SELECT+%40%40version%29%2C+0x23%2C+FLOOR%28RAND%280%29%2A2%29%29+AS+x+FROM+information_schema.columns+GROUP+BY+x%29+y%29+-+-+%27",
+ handler: Handler{
+ Source: "{http.request.uri}",
+ Destinations: []string{"{output}"},
+ Mappings: []Mapping{
+ {
+ InputRegexp: "(?i)(\\^|`|<|>|%|\\\\|\\{|\\}|\\|)",
+ Outputs: []any{"3"},
+ },
+ },
+ },
+ expect: map[string]any{
+ "output": "3",
+ },
+ },
+ {
+ reqURI: "/foo",
+ handler: Handler{
+ Source: "{http.request.uri.path}",
+ Destinations: []string{"{output}"},
+ Mappings: []Mapping{
+ {
+ Input: "/foo",
+ Outputs: []any{"{testvar}"},
+ },
+ },
+ },
+ expect: map[string]any{
+ "output": "testing",
+ },
+ },
+ {
+ reqURI: "/foo",
+ handler: Handler{
+ Source: "{http.request.uri.path}",
+ Destinations: []string{"{output}"},
+ Defaults: []string{"default"},
+ },
+ expect: map[string]any{
+ "output": "default",
+ },
+ },
+ {
+ reqURI: "/foo",
+ handler: Handler{
+ Source: "{http.request.uri.path}",
+ Destinations: []string{"{output}"},
+ Defaults: []string{"{testvar}"},
+ },
+ expect: map[string]any{
+ "output": "testing",
+ },
+ },
+ } {
+ if err := tc.handler.Provision(caddy.Context{}); err != nil {
+ t.Fatalf("Test %d: Provisioning handler: %v", i, err)
+ }
+
+ req, err := http.NewRequest(http.MethodGet, tc.reqURI, nil)
+ if err != nil {
+ t.Fatalf("Test %d: Creating request: %v", i, err)
+ }
+ repl := caddyhttp.NewTestReplacer(req)
+ repl.Set("testvar", "testing")
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ rr := httptest.NewRecorder()
+ noop := caddyhttp.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) error { return nil })
+
+ if err := tc.handler.ServeHTTP(rr, req, noop); err != nil {
+ t.Errorf("Test %d: Handler returned error: %v", i, err)
+ continue
+ }
+
+ for key, expected := range tc.expect {
+ actual, _ := repl.Get(key)
+ if !reflect.DeepEqual(actual, expected) {
+ t.Errorf("Test %d: Expected %#v but got %#v for {%s}", i, expected, actual, key)
+ }
+ }
+ }
+}
diff --git a/modules/caddyhttp/marshalers.go b/modules/caddyhttp/marshalers.go
new file mode 100644
index 00000000000..9bce377f4b0
--- /dev/null
+++ b/modules/caddyhttp/marshalers.go
@@ -0,0 +1,126 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "crypto/tls"
+ "net"
+ "net/http"
+ "strings"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// LoggableHTTPRequest makes an HTTP request loggable with zap.Object().
+type LoggableHTTPRequest struct {
+ *http.Request
+
+ ShouldLogCredentials bool
+}
+
+// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
+func (r LoggableHTTPRequest) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ ip, port, err := net.SplitHostPort(r.RemoteAddr)
+ if err != nil {
+ ip = r.RemoteAddr
+ port = ""
+ }
+
+ enc.AddString("remote_ip", ip)
+ enc.AddString("remote_port", port)
+ if ip, ok := GetVar(r.Context(), ClientIPVarKey).(string); ok {
+ enc.AddString("client_ip", ip)
+ }
+ enc.AddString("proto", r.Proto)
+ enc.AddString("method", r.Method)
+ enc.AddString("host", r.Host)
+ enc.AddString("uri", r.RequestURI)
+ enc.AddObject("headers", LoggableHTTPHeader{
+ Header: r.Header,
+ ShouldLogCredentials: r.ShouldLogCredentials,
+ })
+ if r.TransferEncoding != nil {
+ enc.AddArray("transfer_encoding", LoggableStringArray(r.TransferEncoding))
+ }
+ if r.TLS != nil {
+ enc.AddObject("tls", LoggableTLSConnState(*r.TLS))
+ }
+ return nil
+}
+
+// LoggableHTTPHeader makes an HTTP header loggable with zap.Object().
+// Headers with potentially sensitive information (Cookie, Set-Cookie,
+// Authorization, and Proxy-Authorization) are logged with empty values.
+type LoggableHTTPHeader struct {
+ http.Header
+
+ ShouldLogCredentials bool
+}
+
+// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
+func (h LoggableHTTPHeader) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ if h.Header == nil {
+ return nil
+ }
+ for key, val := range h.Header {
+ if !h.ShouldLogCredentials {
+ switch strings.ToLower(key) {
+ case "cookie", "set-cookie", "authorization", "proxy-authorization":
+ val = []string{"REDACTED"} // see #5669. I still think ▒▒▒▒ would be cool.
+ }
+ }
+ enc.AddArray(key, LoggableStringArray(val))
+ }
+ return nil
+}
+
+// LoggableStringArray makes a slice of strings marshalable for logging.
+type LoggableStringArray []string
+
+// MarshalLogArray satisfies the zapcore.ArrayMarshaler interface.
+func (sa LoggableStringArray) MarshalLogArray(enc zapcore.ArrayEncoder) error {
+ if sa == nil {
+ return nil
+ }
+ for _, s := range sa {
+ enc.AppendString(s)
+ }
+ return nil
+}
+
+// LoggableTLSConnState makes a TLS connection state loggable with zap.Object().
+type LoggableTLSConnState tls.ConnectionState
+
+// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
+func (t LoggableTLSConnState) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ enc.AddBool("resumed", t.DidResume)
+ enc.AddUint16("version", t.Version)
+ enc.AddUint16("cipher_suite", t.CipherSuite)
+ enc.AddString("proto", t.NegotiatedProtocol)
+ enc.AddString("server_name", t.ServerName)
+ if len(t.PeerCertificates) > 0 {
+ enc.AddString("client_common_name", t.PeerCertificates[0].Subject.CommonName)
+ enc.AddString("client_serial", t.PeerCertificates[0].SerialNumber.String())
+ }
+ return nil
+}
+
+// Interface guards
+var (
+ _ zapcore.ObjectMarshaler = (*LoggableHTTPRequest)(nil)
+ _ zapcore.ObjectMarshaler = (*LoggableHTTPHeader)(nil)
+ _ zapcore.ArrayMarshaler = (*LoggableStringArray)(nil)
+ _ zapcore.ObjectMarshaler = (*LoggableTLSConnState)(nil)
+)
diff --git a/modules/caddyhttp/matchers.go b/modules/caddyhttp/matchers.go
new file mode 100644
index 00000000000..e5ca28b95b6
--- /dev/null
+++ b/modules/caddyhttp/matchers.go
@@ -0,0 +1,1657 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "path"
+ "reflect"
+ "regexp"
+ "runtime"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "golang.org/x/net/idna"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+)
+
+type (
+ // MatchHost matches requests by the Host value (case-insensitive).
+ //
+ // When used in a top-level HTTP route,
+ // [qualifying domain names](/docs/automatic-https#hostname-requirements)
+ // may trigger [automatic HTTPS](/docs/automatic-https), which automatically
+ // provisions and renews certificates for you. Before doing this, you
+ // should ensure that DNS records for these domains are properly configured,
+ // especially A/AAAA pointed at your server.
+ //
+ // Automatic HTTPS can be
+ // [customized or disabled](/docs/modules/http#servers/automatic_https).
+ //
+ // Wildcards (`*`) may be used to represent exactly one label of the
+ // hostname, in accordance with RFC 1034 (because host matchers are also
+ // used for automatic HTTPS which influences TLS certificates). Thus,
+ // a host of `*` matches hosts like `localhost` or `internal` but not
+ // `example.com`. To catch all hosts, omit the host matcher entirely.
+ //
+ // The wildcard can be useful for matching all subdomains, for example:
+ // `*.example.com` matches `foo.example.com` but not `foo.bar.example.com`.
+ //
+ // Duplicate entries will return an error.
+ MatchHost []string
+
+ // MatchPath case-insensitively matches requests by the URI's path. Path
+ // matching is exact, not prefix-based, giving you more control and clarity
+ // over matching. Wildcards (`*`) may be used:
+ //
+ // - At the end only, for a prefix match (`/prefix/*`)
+ // - At the beginning only, for a suffix match (`*.suffix`)
+ // - On both sides only, for a substring match (`*/contains/*`)
+ // - In the middle, for a globular match (`/accounts/*/info`)
+ //
+ // Slashes are significant; i.e. `/foo*` matches `/foo`, `/foo/`, `/foo/bar`,
+ // and `/foobar`; but `/foo/*` does not match `/foo` or `/foobar`. Valid
+ // paths start with a slash `/`.
+ //
+ // Because there are, in general, multiple possible escaped forms of any
+ // path, path matchers operate in unescaped space; that is, path matchers
+ // should be written in their unescaped form to prevent ambiguities and
+ // possible security issues, as all request paths will be normalized to
+ // their unescaped forms before matcher evaluation.
+ //
+ // However, escape sequences in a match pattern are supported; they are
+ // compared with the request's raw/escaped path for those bytes only.
+ // In other words, a matcher of `/foo%2Fbar` will match a request path
+ // of precisely `/foo%2Fbar`, but not `/foo/bar`. It follows that matching
+ // the literal percent sign (%) in normalized space can be done using the
+ // escaped form, `%25`.
+ //
+ // Even though wildcards (`*`) operate in the normalized space, the special
+ // escaped wildcard (`%*`), which is not a valid escape sequence, may be
+ // used in place of a span that should NOT be decoded; that is, `/bands/%*`
+ // will match `/bands/AC%2fDC` whereas `/bands/*` will not.
+ //
+ // Even though path matching is done in normalized space, the special
+ // wildcard `%*` may be used in place of a span that should NOT be decoded;
+ // that is, `/bands/%*/` will match `/bands/AC%2fDC/` whereas `/bands/*/`
+ // will not.
+ //
+ // This matcher is fast, so it does not support regular expressions or
+ // capture groups. For slower but more powerful matching, use the
+ // path_regexp matcher. (Note that due to the special treatment of
+ // escape sequences in matcher patterns, they may perform slightly slower
+ // in high-traffic environments.)
+ MatchPath []string
+
+ // MatchPathRE matches requests by a regular expression on the URI's path.
+ // Path matching is performed in the unescaped (decoded) form of the path.
+ //
+ // Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
+ // where `name` is the regular expression's name, and `capture_group` is either
+ // the named or positional capture group from the expression itself. If no name
+ // is given, then the placeholder omits the name: `{http.regexp.capture_group}`
+ // (potentially leading to collisions).
+ MatchPathRE struct{ MatchRegexp }
+
+ // MatchMethod matches requests by the method.
+ MatchMethod []string
+
+ // MatchQuery matches requests by the URI's query string. It takes a JSON object
+ // keyed by the query keys, with an array of string values to match for that key.
+ // Query key matches are exact, but wildcards may be used for value matches. Both
+ // keys and values may be placeholders.
+ //
+ // An example of the structure to match `?key=value&topic=api&query=something` is:
+ //
+ // ```json
+ // {
+ // "key": ["value"],
+ // "topic": ["api"],
+ // "query": ["*"]
+ // }
+ // ```
+ //
+ // Invalid query strings, including those with bad escapings or illegal characters
+ // like semicolons, will fail to parse and thus fail to match.
+ //
+ // **NOTE:** Notice that query string values are arrays, not singular values. This is
+ // because repeated keys are valid in query strings, and each one may have a
+ // different value. This matcher will match for a key if any one of its configured
+ // values is assigned in the query string. Backend applications relying on query
+ // strings MUST take into consideration that query string values are arrays and can
+ // have multiple values.
+ MatchQuery url.Values
+
+ // MatchHeader matches requests by header fields. The key is the field
+ // name and the array is the list of field values. It performs fast,
+ // exact string comparisons of the field values. Fast prefix, suffix,
+ // and substring matches can also be done by suffixing, prefixing, or
+ // surrounding the value with the wildcard `*` character, respectively.
+ // If a list is null, the header must not exist. If the list is empty,
+ // the field must simply exist, regardless of its value.
+ //
+ // **NOTE:** Notice that header values are arrays, not singular values. This is
+ // because repeated fields are valid in headers, and each one may have a
+ // different value. This matcher will match for a field if any one of its configured
+ // values matches in the header. Backend applications relying on headers MUST take
+ // into consideration that header field values are arrays and can have multiple
+ // values.
+ MatchHeader http.Header
+
+ // MatchHeaderRE matches requests by a regular expression on header fields.
+ //
+ // Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
+ // where `name` is the regular expression's name, and `capture_group` is either
+ // the named or positional capture group from the expression itself. If no name
+ // is given, then the placeholder omits the name: `{http.regexp.capture_group}`
+ // (potentially leading to collisions).
+ MatchHeaderRE map[string]*MatchRegexp
+
+ // MatchProtocol matches requests by protocol. Recognized values are
+ // "http", "https", and "grpc" for broad protocol matches, or specific
+ // HTTP versions can be specified like so: "http/1", "http/1.1",
+ // "http/2", "http/3", or minimum versions: "http/2+", etc.
+ MatchProtocol string
+
+ // MatchTLS matches HTTP requests based on the underlying
+ // TLS connection state. If this matcher is specified but
+ // the request did not come over TLS, it will never match.
+ // If this matcher is specified but is empty and the request
+ // did come in over TLS, it will always match.
+ MatchTLS struct {
+ // Matches if the TLS handshake has completed. QUIC 0-RTT early
+ // data may arrive before the handshake completes. Generally, it
+ // is unsafe to replay these requests if they are not idempotent;
+ // additionally, the remote IP of early data packets can more
+ // easily be spoofed. It is conventional to respond with HTTP 425
+ // Too Early if the request cannot risk being processed in this
+ // state.
+ HandshakeComplete *bool `json:"handshake_complete,omitempty"`
+ }
+
+ // MatchNot matches requests by negating the results of its matcher
+ // sets. A single "not" matcher takes one or more matcher sets. Each
+ // matcher set is OR'ed; in other words, if any matcher set returns
+ // true, the final result of the "not" matcher is false. Individual
+ // matchers within a set work the same (i.e. different matchers in
+ // the same set are AND'ed).
+ //
+ // NOTE: The generated docs which describe the structure of this
+ // module are wrong because of how this type unmarshals JSON in a
+ // custom way. The correct structure is:
+ //
+ // ```json
+ // [
+ // {},
+ // {}
+ // ]
+ // ```
+ //
+ // where each of the array elements is a matcher set, i.e. an
+ // object keyed by matcher name.
+ MatchNot struct {
+ MatcherSetsRaw []caddy.ModuleMap `json:"-" caddy:"namespace=http.matchers"`
+ MatcherSets []MatcherSet `json:"-"`
+ }
+)
+
+func init() {
+ caddy.RegisterModule(MatchHost{})
+ caddy.RegisterModule(MatchPath{})
+ caddy.RegisterModule(MatchPathRE{})
+ caddy.RegisterModule(MatchMethod{})
+ caddy.RegisterModule(MatchQuery{})
+ caddy.RegisterModule(MatchHeader{})
+ caddy.RegisterModule(MatchHeaderRE{})
+ caddy.RegisterModule(new(MatchProtocol))
+ caddy.RegisterModule(MatchTLS{})
+ caddy.RegisterModule(MatchNot{})
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchHost) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.host",
+ New: func() caddy.Module { return new(MatchHost) },
+ }
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *MatchHost) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ *m = append(*m, d.RemainingArgs()...)
+ if d.NextBlock(0) {
+ return d.Err("malformed host matcher: blocks are not supported")
+ }
+ }
+ return nil
+}
+
+// Provision sets up and validates m, including making it more efficient for large lists.
+func (m MatchHost) Provision(_ caddy.Context) error {
+ // check for duplicates; they are nonsensical and reduce efficiency
+ // (we could just remove them, but the user should know their config is erroneous)
+ seen := make(map[string]int, len(m))
+ for i, host := range m {
+ asciiHost, err := idna.ToASCII(host)
+ if err != nil {
+ return fmt.Errorf("converting hostname '%s' to ASCII: %v", host, err)
+ }
+ if asciiHost != host {
+ m[i] = asciiHost
+ }
+ normalizedHost := strings.ToLower(asciiHost)
+ if firstI, ok := seen[normalizedHost]; ok {
+ return fmt.Errorf("host at index %d is repeated at index %d: %s", firstI, i, host)
+ }
+ seen[normalizedHost] = i
+ }
+
+ if m.large() {
+ // sort the slice lexicographically, grouping "fuzzy" entries (wildcards and placeholders)
+ // at the front of the list; this allows us to use binary search for exact matches, which
+ // we have seen from experience is the most common kind of value in large lists; and any
+ // other kinds of values (wildcards and placeholders) are grouped in front so the linear
+ // search should find a match fairly quickly
+ sort.Slice(m, func(i, j int) bool {
+ iInexact, jInexact := m.fuzzy(m[i]), m.fuzzy(m[j])
+ if iInexact && !jInexact {
+ return true
+ }
+ if !iInexact && jInexact {
+ return false
+ }
+ return m[i] < m[j]
+ })
+ }
+
+ return nil
+}
+
+// Match returns true if r matches m.
+func (m MatchHost) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchHost) MatchWithError(r *http.Request) (bool, error) {
+ reqHost, _, err := net.SplitHostPort(r.Host)
+ if err != nil {
+ // OK; probably didn't have a port
+ reqHost = r.Host
+
+ // make sure we strip the brackets from IPv6 addresses
+ reqHost = strings.TrimPrefix(reqHost, "[")
+ reqHost = strings.TrimSuffix(reqHost, "]")
+ }
+
+ if m.large() {
+ // fast path: locate exact match using binary search (about 100-1000x faster for large lists)
+ pos := sort.Search(len(m), func(i int) bool {
+ if m.fuzzy(m[i]) {
+ return false
+ }
+ return m[i] >= reqHost
+ })
+ if pos < len(m) && m[pos] == reqHost {
+ return true, nil
+ }
+ }
+
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+
+outer:
+ for _, host := range m {
+ // fast path: if matcher is large, we already know we don't have an exact
+ // match, so we're only looking for fuzzy match now, which should be at the
+ // front of the list; if we have reached a value that is not fuzzy, there
+ // will be no match and we can short-circuit for efficiency
+ if m.large() && !m.fuzzy(host) {
+ break
+ }
+
+ host = repl.ReplaceAll(host, "")
+ if strings.Contains(host, "*") {
+ patternParts := strings.Split(host, ".")
+ incomingParts := strings.Split(reqHost, ".")
+ if len(patternParts) != len(incomingParts) {
+ continue
+ }
+ for i := range patternParts {
+ if patternParts[i] == "*" {
+ continue
+ }
+ if !strings.EqualFold(patternParts[i], incomingParts[i]) {
+ continue outer
+ }
+ }
+ return true, nil
+ } else if strings.EqualFold(reqHost, host) {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression host('localhost')
+func (MatchHost) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ "host",
+ "host_match_request_list",
+ []*cel.Type{cel.ListType(cel.StringType)},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ strList, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+ matcher := MatchHost(strList.([]string))
+ err = matcher.Provision(ctx)
+ return matcher, err
+ },
+ )
+}
+
+// fuzzy returns true if the given hostname h is not a specific
+// hostname, e.g. has placeholders or wildcards.
+func (MatchHost) fuzzy(h string) bool { return strings.ContainsAny(h, "{*") }
+
+// large returns true if m is considered to be large. Optimizing
+// the matcher for smaller lists has diminishing returns.
+// See related benchmark function in test file to conduct experiments.
+func (m MatchHost) large() bool { return len(m) > 100 }
+
+// CaddyModule returns the Caddy module information.
+func (MatchPath) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.path",
+ New: func() caddy.Module { return new(MatchPath) },
+ }
+}
+
+// Provision lower-cases the paths in m to ensure case-insensitive matching.
+func (m MatchPath) Provision(_ caddy.Context) error {
+ for i := range m {
+ if m[i] == "*" && i > 0 {
+ // will always match, so just put it first
+ m[0] = m[i]
+ break
+ }
+ m[i] = strings.ToLower(m[i])
+ }
+ return nil
+}
+
+// Match returns true if r matches m.
+func (m MatchPath) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchPath) MatchWithError(r *http.Request) (bool, error) {
+ // Even though RFC 9110 says that path matching is case-sensitive
+ // (https://www.rfc-editor.org/rfc/rfc9110.html#section-4.2.3),
+ // we do case-insensitive matching to mitigate security issues
+ // related to differences between operating systems, applications,
+ // etc; if case-sensitive matching is needed, the regex matcher
+ // can be used instead.
+ reqPath := strings.ToLower(r.URL.Path)
+
+ // See #2917; Windows ignores trailing dots and spaces
+ // when accessing files (sigh), potentially causing a
+ // security risk (cry) if PHP files end up being served
+ // as static files, exposing the source code, instead of
+ // being matched by *.php to be treated as PHP scripts.
+ if runtime.GOOS == "windows" { // issue #5613
+ reqPath = strings.TrimRight(reqPath, ". ")
+ }
+
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+
+ for _, matchPattern := range m {
+ matchPattern = repl.ReplaceAll(matchPattern, "")
+
+ // special case: whole path is wildcard; this is unnecessary
+ // as it matches all requests, which is the same as no matcher
+ if matchPattern == "*" {
+ return true, nil
+ }
+
+ // Clean the path, merge doubled slashes, etc.
+ // This ensures maliciously crafted requests can't bypass
+ // the path matcher. See #4407. Good security posture
+ // requires that we should do all we can to reduce any
+ // funny-looking paths into "normalized" forms such that
+ // weird variants can't sneak by.
+ //
+ // How we clean the path depends on the kind of pattern:
+ // we either merge slashes or we don't. If the pattern
+ // has double slashes, we preserve them in the path.
+ //
+ // TODO: Despite the fact that the *vast* majority of path
+ // matchers have only 1 pattern, a possible optimization is
+ // to remember the cleaned form of the path for future
+ // iterations; it's just that the way we clean depends on
+ // the kind of pattern.
+
+ mergeSlashes := !strings.Contains(matchPattern, "//")
+
+ // if '%' appears in the match pattern, we interpret that to mean
+ // the intent is to compare that part of the path in raw/escaped
+ // space; i.e. "%40"=="%40", not "@", and "%2F"=="%2F", not "/"
+ if strings.Contains(matchPattern, "%") {
+ reqPathForPattern := CleanPath(r.URL.EscapedPath(), mergeSlashes)
+ if m.matchPatternWithEscapeSequence(reqPathForPattern, matchPattern) {
+ return true, nil
+ }
+
+ // doing prefix/suffix/substring matches doesn't make sense
+ continue
+ }
+
+ reqPathForPattern := CleanPath(reqPath, mergeSlashes)
+
+ // for substring, prefix, and suffix matching, only perform those
+ // special, fast matches if they are the only wildcards in the pattern;
+ // otherwise we assume a globular match if any * appears in the middle
+
+ // special case: first and last characters are wildcard,
+ // treat it as a fast substring match
+ if strings.Count(matchPattern, "*") == 2 &&
+ strings.HasPrefix(matchPattern, "*") &&
+ strings.HasSuffix(matchPattern, "*") {
+ if strings.Contains(reqPathForPattern, matchPattern[1:len(matchPattern)-1]) {
+ return true, nil
+ }
+ continue
+ }
+
+ // only perform prefix/suffix match if it is the only wildcard...
+ // I think that is more correct most of the time
+ if strings.Count(matchPattern, "*") == 1 {
+ // special case: first character is a wildcard,
+ // treat it as a fast suffix match
+ if strings.HasPrefix(matchPattern, "*") {
+ if strings.HasSuffix(reqPathForPattern, matchPattern[1:]) {
+ return true, nil
+ }
+ continue
+ }
+
+ // special case: last character is a wildcard,
+ // treat it as a fast prefix match
+ if strings.HasSuffix(matchPattern, "*") {
+ if strings.HasPrefix(reqPathForPattern, matchPattern[:len(matchPattern)-1]) {
+ return true, nil
+ }
+ continue
+ }
+ }
+
+ // at last, use globular matching, which also is exact matching
+ // if there are no glob/wildcard chars; we ignore the error here
+ // because we can't handle it anyway
+ matches, _ := path.Match(matchPattern, reqPathForPattern)
+ if matches {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func (MatchPath) matchPatternWithEscapeSequence(escapedPath, matchPath string) bool {
+ // We would just compare the pattern against r.URL.Path,
+ // but the pattern contains %, indicating that we should
+ // compare at least some part of the path in raw/escaped
+ // space, not normalized space; so we build the string we
+ // will compare against by adding the normalized parts
+ // of the path, then switching to the escaped parts where
+ // the pattern hints to us wherever % is present.
+ var sb strings.Builder
+
+ // iterate the pattern and escaped path in lock-step;
+ // increment iPattern every time we consume a char from the pattern,
+ // increment iPath every time we consume a char from the path;
+ // iPattern and iPath are our cursors/iterator positions for each string
+ var iPattern, iPath int
+ for {
+ if iPattern >= len(matchPath) || iPath >= len(escapedPath) {
+ break
+ }
+
+ // get the next character from the request path
+
+ pathCh := string(escapedPath[iPath])
+ var escapedPathCh string
+
+ // normalize (decode) escape sequences
+ if pathCh == "%" && len(escapedPath) >= iPath+3 {
+ // hold onto this in case we find out the intent is to match in escaped space here;
+ // we lowercase it even though technically the spec says: "For consistency, URI
+ // producers and normalizers should use uppercase hexadecimal digits for all percent-
+ // encodings" (RFC 3986 section 2.1) - we lowercased the matcher pattern earlier in
+ // provisioning so we do the same here to gain case-insensitivity in equivalence;
+ // besides, this string is never shown visibly
+ escapedPathCh = strings.ToLower(escapedPath[iPath : iPath+3])
+
+ var err error
+ pathCh, err = url.PathUnescape(escapedPathCh)
+ if err != nil {
+ // should be impossible unless EscapedPath() is giving us an invalid sequence!
+ return false
+ }
+ iPath += 2 // escape sequence is 2 bytes longer than normal char
+ }
+
+ // now get the next character from the pattern
+
+ normalize := true
+ switch matchPath[iPattern] {
+ case '%':
+ // escape sequence
+
+ // if not a wildcard ("%*"), compare literally; consume next two bytes of pattern
+ if len(matchPath) >= iPattern+3 && matchPath[iPattern+1] != '*' {
+ sb.WriteString(escapedPathCh)
+ iPath++
+ iPattern += 2
+ break
+ }
+
+ // escaped wildcard sequence; consume next byte only ('*')
+ iPattern++
+ normalize = false
+
+ fallthrough
+ case '*':
+ // wildcard, so consume until next matching character
+ remaining := escapedPath[iPath:]
+ until := len(escapedPath) - iPath // go until end of string...
+ if iPattern < len(matchPath)-1 { // ...unless the * is not at the end
+ nextCh := matchPath[iPattern+1]
+ until = strings.IndexByte(remaining, nextCh)
+ if until == -1 {
+ // terminating char of wildcard span not found, so definitely no match
+ return false
+ }
+ }
+ if until == 0 {
+ // empty span; nothing to add on this iteration
+ break
+ }
+ next := remaining[:until]
+ if normalize {
+ var err error
+ next, err = url.PathUnescape(next)
+ if err != nil {
+ return false // should be impossible anyway
+ }
+ }
+ sb.WriteString(next)
+ iPath += until
+ default:
+ sb.WriteString(pathCh)
+ iPath++
+ }
+
+ iPattern++
+ }
+
+ // we can now treat rawpath globs (%*) as regular globs (*)
+ matchPath = strings.ReplaceAll(matchPath, "%*", "*")
+
+ // ignore error here because we can't handle it anyway=
+ matches, _ := path.Match(matchPath, sb.String())
+ return matches
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression path('*substring*', '*suffix')
+func (MatchPath) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ // name of the macro, this is the function name that users see when writing expressions.
+ "path",
+ // name of the function that the macro will be rewritten to call.
+ "path_match_request_list",
+ // internal data type of the MatchPath value.
+ []*cel.Type{cel.ListType(cel.StringType)},
+ // function to convert a constant list of strings to a MatchPath instance.
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ strList, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+ matcher := MatchPath(strList.([]string))
+ err = matcher.Provision(ctx)
+ return matcher, err
+ },
+ )
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *MatchPath) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ *m = append(*m, d.RemainingArgs()...)
+ if d.NextBlock(0) {
+ return d.Err("malformed path matcher: blocks are not supported")
+ }
+ }
+ return nil
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchPathRE) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.path_regexp",
+ New: func() caddy.Module { return new(MatchPathRE) },
+ }
+}
+
+// Match returns true if r matches m.
+func (m MatchPathRE) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchPathRE) MatchWithError(r *http.Request) (bool, error) {
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+
+ // Clean the path, merges doubled slashes, etc.
+ // This ensures maliciously crafted requests can't bypass
+ // the path matcher. See #4407
+ cleanedPath := cleanPath(r.URL.Path)
+
+ return m.MatchRegexp.Match(cleanedPath, repl), nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression path_regexp('^/bar')
+func (MatchPathRE) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ unnamedPattern, err := CELMatcherImpl(
+ "path_regexp",
+ "path_regexp_request_string",
+ []*cel.Type{cel.StringType},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ pattern := data.(types.String)
+ matcher := MatchPathRE{MatchRegexp{
+ Name: ctx.Value(MatcherNameCtxKey).(string),
+ Pattern: string(pattern),
+ }}
+ err := matcher.Provision(ctx)
+ return matcher, err
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+ namedPattern, err := CELMatcherImpl(
+ "path_regexp",
+ "path_regexp_request_string_string",
+ []*cel.Type{cel.StringType, cel.StringType},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ params, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+ strParams := params.([]string)
+ name := strParams[0]
+ if name == "" {
+ name = ctx.Value(MatcherNameCtxKey).(string)
+ }
+ matcher := MatchPathRE{MatchRegexp{
+ Name: name,
+ Pattern: strParams[1],
+ }}
+ err = matcher.Provision(ctx)
+ return matcher, err
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+ envOpts := append(unnamedPattern.CompileOptions(), namedPattern.CompileOptions()...)
+ prgOpts := append(unnamedPattern.ProgramOptions(), namedPattern.ProgramOptions()...)
+ return NewMatcherCELLibrary(envOpts, prgOpts), nil
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchMethod) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.method",
+ New: func() caddy.Module { return new(MatchMethod) },
+ }
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *MatchMethod) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ *m = append(*m, d.RemainingArgs()...)
+ if d.NextBlock(0) {
+ return d.Err("malformed method matcher: blocks are not supported")
+ }
+ }
+ return nil
+}
+
+// Match returns true if r matches m.
+func (m MatchMethod) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchMethod) MatchWithError(r *http.Request) (bool, error) {
+ return slices.Contains(m, r.Method), nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression method('PUT', 'POST')
+func (MatchMethod) CELLibrary(_ caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ "method",
+ "method_request_list",
+ []*cel.Type{cel.ListType(cel.StringType)},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ strList, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+ return MatchMethod(strList.([]string)), nil
+ },
+ )
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchQuery) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.query",
+ New: func() caddy.Module { return new(MatchQuery) },
+ }
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *MatchQuery) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ if *m == nil {
+ *m = make(map[string][]string)
+ }
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ for _, query := range d.RemainingArgs() {
+ if query == "" {
+ continue
+ }
+ before, after, found := strings.Cut(query, "=")
+ if !found {
+ return d.Errf("malformed query matcher token: %s; must be in param=val format", d.Val())
+ }
+ url.Values(*m).Add(before, after)
+ }
+ if d.NextBlock(0) {
+ return d.Err("malformed query matcher: blocks are not supported")
+ }
+ }
+ return nil
+}
+
+// Match returns true if r matches m. An empty m matches an empty query string.
+func (m MatchQuery) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+// An empty m matches an empty query string.
+func (m MatchQuery) MatchWithError(r *http.Request) (bool, error) {
+ // If no query keys are configured, this only
+ // matches an empty query string.
+ if len(m) == 0 {
+ return len(r.URL.Query()) == 0, nil
+ }
+
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+
+ // parse query string just once, for efficiency
+ parsed, err := url.ParseQuery(r.URL.RawQuery)
+ if err != nil {
+ // Illegal query string. Likely bad escape sequence or unescaped literals.
+ // Note that semicolons in query string have a controversial history. Summaries:
+ // - https://github.com/golang/go/issues/50034
+ // - https://github.com/golang/go/issues/25192
+ // Despite the URL WHATWG spec mandating the use of & separators for query strings,
+ // every URL parser implementation is different, and Filippo Valsorda rightly wrote:
+ // "Relying on parser alignment for security is doomed." Overall conclusion is that
+ // splitting on & and rejecting ; in key=value pairs is safer than accepting raw ;.
+ // We regard the Go team's decision as sound and thus reject malformed query strings.
+ return false, nil
+ }
+
+ // Count the amount of matched keys, to ensure we AND
+ // between all configured query keys; all keys must
+ // match at least one value.
+ matchedKeys := 0
+ for param, vals := range m {
+ param = repl.ReplaceAll(param, "")
+ paramVal, found := parsed[param]
+ if !found {
+ return false, nil
+ }
+ for _, v := range vals {
+ v = repl.ReplaceAll(v, "")
+ if slices.Contains(paramVal, v) || v == "*" {
+ matchedKeys++
+ break
+ }
+ }
+ }
+ return matchedKeys == len(m), nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression query({'sort': 'asc'}) || query({'foo': ['*bar*', 'baz']})
+func (MatchQuery) CELLibrary(_ caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ "query",
+ "query_matcher_request_map",
+ []*cel.Type{CELTypeJSON},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ mapStrListStr, err := CELValueToMapStrList(data)
+ if err != nil {
+ return nil, err
+ }
+ return MatchQuery(url.Values(mapStrListStr)), nil
+ },
+ )
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchHeader) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.header",
+ New: func() caddy.Module { return new(MatchHeader) },
+ }
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *MatchHeader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ if *m == nil {
+ *m = make(map[string][]string)
+ }
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ var field, val string
+ if !d.Args(&field) {
+ return d.Errf("malformed header matcher: expected field")
+ }
+
+ if strings.HasPrefix(field, "!") {
+ if len(field) == 1 {
+ return d.Errf("malformed header matcher: must have field name following ! character")
+ }
+
+ field = field[1:]
+ headers := *m
+ headers[field] = nil
+ m = &headers
+ if d.NextArg() {
+ return d.Errf("malformed header matcher: null matching headers cannot have a field value")
+ }
+ } else {
+ if !d.NextArg() {
+ return d.Errf("malformed header matcher: expected both field and value")
+ }
+
+ // If multiple header matchers with the same header field are defined,
+ // we want to add the existing to the list of headers (will be OR'ed)
+ val = d.Val()
+ http.Header(*m).Add(field, val)
+ }
+
+ if d.NextBlock(0) {
+ return d.Err("malformed header matcher: blocks are not supported")
+ }
+ }
+ return nil
+}
+
+// Match returns true if r matches m.
+func (m MatchHeader) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchHeader) MatchWithError(r *http.Request) (bool, error) {
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+ return matchHeaders(r.Header, http.Header(m), r.Host, r.TransferEncoding, repl), nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression header({'content-type': 'image/png'})
+// expression header({'foo': ['bar', 'baz']}) // match bar or baz
+func (MatchHeader) CELLibrary(_ caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ "header",
+ "header_matcher_request_map",
+ []*cel.Type{CELTypeJSON},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ mapStrListStr, err := CELValueToMapStrList(data)
+ if err != nil {
+ return nil, err
+ }
+ return MatchHeader(http.Header(mapStrListStr)), nil
+ },
+ )
+}
+
+// getHeaderFieldVals returns the field values for the given fieldName from input.
+// The host parameter should be obtained from the http.Request.Host field, and the
+// transferEncoding from http.Request.TransferEncoding, since net/http removes them
+// from the header map.
+func getHeaderFieldVals(input http.Header, fieldName, host string, transferEncoding []string) []string {
+ fieldName = textproto.CanonicalMIMEHeaderKey(fieldName)
+ if fieldName == "Host" && host != "" {
+ return []string{host}
+ }
+ if fieldName == "Transfer-Encoding" && input[fieldName] == nil {
+ return transferEncoding
+ }
+ return input[fieldName]
+}
+
+// matchHeaders returns true if input matches the criteria in against without regex.
+// The host parameter should be obtained from the http.Request.Host field since
+// net/http removes it from the header map.
+func matchHeaders(input, against http.Header, host string, transferEncoding []string, repl *caddy.Replacer) bool {
+ for field, allowedFieldVals := range against {
+ actualFieldVals := getHeaderFieldVals(input, field, host, transferEncoding)
+ if allowedFieldVals != nil && len(allowedFieldVals) == 0 && actualFieldVals != nil {
+ // a non-nil but empty list of allowed values means
+ // match if the header field exists at all
+ continue
+ }
+ if allowedFieldVals == nil && actualFieldVals == nil {
+ // a nil list means match if the header does not exist at all
+ continue
+ }
+ var match bool
+ fieldVals:
+ for _, actualFieldVal := range actualFieldVals {
+ for _, allowedFieldVal := range allowedFieldVals {
+ if repl != nil {
+ allowedFieldVal = repl.ReplaceAll(allowedFieldVal, "")
+ }
+ switch {
+ case allowedFieldVal == "*":
+ match = true
+ case strings.HasPrefix(allowedFieldVal, "*") && strings.HasSuffix(allowedFieldVal, "*"):
+ match = strings.Contains(actualFieldVal, allowedFieldVal[1:len(allowedFieldVal)-1])
+ case strings.HasPrefix(allowedFieldVal, "*"):
+ match = strings.HasSuffix(actualFieldVal, allowedFieldVal[1:])
+ case strings.HasSuffix(allowedFieldVal, "*"):
+ match = strings.HasPrefix(actualFieldVal, allowedFieldVal[:len(allowedFieldVal)-1])
+ default:
+ match = actualFieldVal == allowedFieldVal
+ }
+ if match {
+ break fieldVals
+ }
+ }
+ }
+ if !match {
+ return false
+ }
+ }
+ return true
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchHeaderRE) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.header_regexp",
+ New: func() caddy.Module { return new(MatchHeaderRE) },
+ }
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *MatchHeaderRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ if *m == nil {
+ *m = make(map[string]*MatchRegexp)
+ }
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ var first, second, third string
+ if !d.Args(&first, &second) {
+ return d.ArgErr()
+ }
+
+ var name, field, val string
+ if d.Args(&third) {
+ name = first
+ field = second
+ val = third
+ } else {
+ field = first
+ val = second
+ }
+
+ // Default to the named matcher's name, if no regexp name is provided
+ if name == "" {
+ name = d.GetContextString(caddyfile.MatcherNameCtxKey)
+ }
+
+ // If there's already a pattern for this field
+ // then we would end up overwriting the old one
+ if (*m)[field] != nil {
+ return d.Errf("header_regexp matcher can only be used once per named matcher, per header field: %s", field)
+ }
+
+ (*m)[field] = &MatchRegexp{Pattern: val, Name: name}
+
+ if d.NextBlock(0) {
+ return d.Err("malformed header_regexp matcher: blocks are not supported")
+ }
+ }
+ return nil
+}
+
+// Match returns true if r matches m.
+func (m MatchHeaderRE) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchHeaderRE) MatchWithError(r *http.Request) (bool, error) {
+ for field, rm := range m {
+ actualFieldVals := getHeaderFieldVals(r.Header, field, r.Host, r.TransferEncoding)
+ match := false
+ fieldVal:
+ for _, actualFieldVal := range actualFieldVals {
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+ if rm.Match(actualFieldVal, repl) {
+ match = true
+ break fieldVal
+ }
+ }
+ if !match {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+// Provision compiles m's regular expressions.
+func (m MatchHeaderRE) Provision(ctx caddy.Context) error {
+ for _, rm := range m {
+ err := rm.Provision(ctx)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate validates m's regular expressions.
+func (m MatchHeaderRE) Validate() error {
+ for _, rm := range m {
+ err := rm.Validate()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression header_regexp('foo', 'Field', 'fo+')
+func (MatchHeaderRE) CELLibrary(ctx caddy.Context) (cel.Library, error) {
+ unnamedPattern, err := CELMatcherImpl(
+ "header_regexp",
+ "header_regexp_request_string_string",
+ []*cel.Type{cel.StringType, cel.StringType},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ params, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+ strParams := params.([]string)
+ matcher := MatchHeaderRE{}
+ matcher[strParams[0]] = &MatchRegexp{
+ Pattern: strParams[1],
+ Name: ctx.Value(MatcherNameCtxKey).(string),
+ }
+ err = matcher.Provision(ctx)
+ return matcher, err
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+ namedPattern, err := CELMatcherImpl(
+ "header_regexp",
+ "header_regexp_request_string_string_string",
+ []*cel.Type{cel.StringType, cel.StringType, cel.StringType},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ refStringList := reflect.TypeOf([]string{})
+ params, err := data.ConvertToNative(refStringList)
+ if err != nil {
+ return nil, err
+ }
+ strParams := params.([]string)
+ name := strParams[0]
+ if name == "" {
+ name = ctx.Value(MatcherNameCtxKey).(string)
+ }
+ matcher := MatchHeaderRE{}
+ matcher[strParams[1]] = &MatchRegexp{
+ Pattern: strParams[2],
+ Name: name,
+ }
+ err = matcher.Provision(ctx)
+ return matcher, err
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+ envOpts := append(unnamedPattern.CompileOptions(), namedPattern.CompileOptions()...)
+ prgOpts := append(unnamedPattern.ProgramOptions(), namedPattern.ProgramOptions()...)
+ return NewMatcherCELLibrary(envOpts, prgOpts), nil
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchProtocol) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.protocol",
+ New: func() caddy.Module { return new(MatchProtocol) },
+ }
+}
+
+// Match returns true if r matches m.
+func (m MatchProtocol) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchProtocol) MatchWithError(r *http.Request) (bool, error) {
+ switch string(m) {
+ case "grpc":
+ return strings.HasPrefix(r.Header.Get("content-type"), "application/grpc"), nil
+ case "https":
+ return r.TLS != nil, nil
+ case "http":
+ return r.TLS == nil, nil
+ case "http/1.0":
+ return r.ProtoMajor == 1 && r.ProtoMinor == 0, nil
+ case "http/1.0+":
+ return r.ProtoAtLeast(1, 0), nil
+ case "http/1.1":
+ return r.ProtoMajor == 1 && r.ProtoMinor == 1, nil
+ case "http/1.1+":
+ return r.ProtoAtLeast(1, 1), nil
+ case "http/2":
+ return r.ProtoMajor == 2, nil
+ case "http/2+":
+ return r.ProtoAtLeast(2, 0), nil
+ case "http/3":
+ return r.ProtoMajor == 3, nil
+ case "http/3+":
+ return r.ProtoAtLeast(3, 0), nil
+ }
+ return false, nil
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *MatchProtocol) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ var proto string
+ if !d.Args(&proto) {
+ return d.Err("expected exactly one protocol")
+ }
+ *m = MatchProtocol(proto)
+ }
+ return nil
+}
+
+// CELLibrary produces options that expose this matcher for use in CEL
+// expression matchers.
+//
+// Example:
+//
+// expression protocol('https')
+func (MatchProtocol) CELLibrary(_ caddy.Context) (cel.Library, error) {
+ return CELMatcherImpl(
+ "protocol",
+ "protocol_request_string",
+ []*cel.Type{cel.StringType},
+ func(data ref.Val) (RequestMatcherWithError, error) {
+ protocolStr, ok := data.(types.String)
+ if !ok {
+ return nil, errors.New("protocol argument was not a string")
+ }
+ return MatchProtocol(strings.ToLower(string(protocolStr))), nil
+ },
+ )
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchTLS) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.tls",
+ New: func() caddy.Module { return new(MatchTLS) },
+ }
+}
+
+// Match returns true if r matches m.
+func (m MatchTLS) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m.
+func (m MatchTLS) MatchWithError(r *http.Request) (bool, error) {
+ if r.TLS == nil {
+ return false, nil
+ }
+ if m.HandshakeComplete != nil {
+ if (!*m.HandshakeComplete && r.TLS.HandshakeComplete) ||
+ (*m.HandshakeComplete && !r.TLS.HandshakeComplete) {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+// UnmarshalCaddyfile parses Caddyfile tokens for this matcher. Syntax:
+//
+// ... tls [early_data]
+//
+// EXPERIMENTAL SYNTAX: Subject to change.
+func (m *MatchTLS) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ if d.NextArg() {
+ switch d.Val() {
+ case "early_data":
+ var false bool
+ m.HandshakeComplete = &false
+ }
+ }
+ if d.NextArg() {
+ return d.ArgErr()
+ }
+ if d.NextBlock(0) {
+ return d.Err("malformed tls matcher: blocks are not supported yet")
+ }
+ }
+ return nil
+}
+
+// CaddyModule returns the Caddy module information.
+func (MatchNot) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.matchers.not",
+ New: func() caddy.Module { return new(MatchNot) },
+ }
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (m *MatchNot) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ matcherSet, err := ParseCaddyfileNestedMatcherSet(d)
+ if err != nil {
+ return err
+ }
+ m.MatcherSetsRaw = append(m.MatcherSetsRaw, matcherSet)
+ }
+ return nil
+}
+
+// UnmarshalJSON satisfies json.Unmarshaler. It puts the JSON
+// bytes directly into m's MatcherSetsRaw field.
+func (m *MatchNot) UnmarshalJSON(data []byte) error {
+ return json.Unmarshal(data, &m.MatcherSetsRaw)
+}
+
+// MarshalJSON satisfies json.Marshaler by marshaling
+// m's raw matcher sets.
+func (m MatchNot) MarshalJSON() ([]byte, error) {
+ return json.Marshal(m.MatcherSetsRaw)
+}
+
+// Provision loads the matcher modules to be negated.
+func (m *MatchNot) Provision(ctx caddy.Context) error {
+ matcherSets, err := ctx.LoadModule(m, "MatcherSetsRaw")
+ if err != nil {
+ return fmt.Errorf("loading matcher sets: %v", err)
+ }
+ for _, modMap := range matcherSets.([]map[string]any) {
+ var ms MatcherSet
+ for _, modIface := range modMap {
+ if mod, ok := modIface.(RequestMatcherWithError); ok {
+ ms = append(ms, mod)
+ continue
+ }
+ if mod, ok := modIface.(RequestMatcher); ok {
+ ms = append(ms, mod)
+ continue
+ }
+ return fmt.Errorf("module is not a request matcher: %T", modIface)
+ }
+ m.MatcherSets = append(m.MatcherSets, ms)
+ }
+ return nil
+}
+
+// Match returns true if r matches m. Since this matcher negates
+// the embedded matchers, false is returned if any of its matcher
+// sets return true.
+func (m MatchNot) Match(r *http.Request) bool {
+ match, _ := m.MatchWithError(r)
+ return match
+}
+
+// MatchWithError returns true if r matches m. Since this matcher
+// negates the embedded matchers, false is returned if any of its
+// matcher sets return true.
+func (m MatchNot) MatchWithError(r *http.Request) (bool, error) {
+ for _, ms := range m.MatcherSets {
+ matches, err := ms.MatchWithError(r)
+ if err != nil {
+ return false, err
+ }
+ if matches {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+// MatchRegexp is an embedable type for matching
+// using regular expressions. It adds placeholders
+// to the request's replacer.
+type MatchRegexp struct {
+ // A unique name for this regular expression. Optional,
+ // but useful to prevent overwriting captures from other
+ // regexp matchers.
+ Name string `json:"name,omitempty"`
+
+ // The regular expression to evaluate, in RE2 syntax,
+ // which is the same general syntax used by Go, Perl,
+ // and Python. For details, see
+ // [Go's regexp package](https://golang.org/pkg/regexp/).
+ // Captures are accessible via placeholders. Unnamed
+ // capture groups are exposed as their numeric, 1-based
+ // index, while named capture groups are available by
+ // the capture group name.
+ Pattern string `json:"pattern"`
+
+ compiled *regexp.Regexp
+}
+
+// Provision compiles the regular expression.
+func (mre *MatchRegexp) Provision(caddy.Context) error {
+ re, err := regexp.Compile(mre.Pattern)
+ if err != nil {
+ return fmt.Errorf("compiling matcher regexp %s: %v", mre.Pattern, err)
+ }
+ mre.compiled = re
+ return nil
+}
+
+// Validate ensures mre is set up correctly.
+func (mre *MatchRegexp) Validate() error {
+ if mre.Name != "" && !wordRE.MatchString(mre.Name) {
+ return fmt.Errorf("invalid regexp name (must contain only word characters): %s", mre.Name)
+ }
+ return nil
+}
+
+// Match returns true if input matches the compiled regular
+// expression in mre. It sets values on the replacer repl
+// associated with capture groups, using the given scope
+// (namespace).
+func (mre *MatchRegexp) Match(input string, repl *caddy.Replacer) bool {
+ matches := mre.compiled.FindStringSubmatch(input)
+ if matches == nil {
+ return false
+ }
+
+ // save all capture groups, first by index
+ for i, match := range matches {
+ keySuffix := "." + strconv.Itoa(i)
+ if mre.Name != "" {
+ repl.Set(regexpPlaceholderPrefix+"."+mre.Name+keySuffix, match)
+ }
+ repl.Set(regexpPlaceholderPrefix+keySuffix, match)
+ }
+
+ // then by name
+ for i, name := range mre.compiled.SubexpNames() {
+ // skip the first element (the full match), and empty names
+ if i == 0 || name == "" {
+ continue
+ }
+
+ keySuffix := "." + name
+ if mre.Name != "" {
+ repl.Set(regexpPlaceholderPrefix+"."+mre.Name+keySuffix, matches[i])
+ }
+ repl.Set(regexpPlaceholderPrefix+keySuffix, matches[i])
+ }
+
+ return true
+}
+
+// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
+func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ // iterate to merge multiple matchers into one
+ for d.Next() {
+ // If this is the second iteration of the loop
+ // then there's more than one path_regexp matcher
+ // and we would end up overwriting the old one
+ if mre.Pattern != "" {
+ return d.Err("regular expression can only be used once per named matcher")
+ }
+
+ args := d.RemainingArgs()
+ switch len(args) {
+ case 1:
+ mre.Pattern = args[0]
+ case 2:
+ mre.Name = args[0]
+ mre.Pattern = args[1]
+ default:
+ return d.ArgErr()
+ }
+
+ // Default to the named matcher's name, if no regexp name is provided
+ if mre.Name == "" {
+ mre.Name = d.GetContextString(caddyfile.MatcherNameCtxKey)
+ }
+
+ if d.NextBlock(0) {
+ return d.Err("malformed path_regexp matcher: blocks are not supported")
+ }
+ }
+ return nil
+}
+
+// ParseCaddyfileNestedMatcher parses the Caddyfile tokens for a nested
+// matcher set, and returns its raw module map value.
+func ParseCaddyfileNestedMatcherSet(d *caddyfile.Dispenser) (caddy.ModuleMap, error) {
+ matcherMap := make(map[string]any)
+
+ // in case there are multiple instances of the same matcher, concatenate
+ // their tokens (we expect that UnmarshalCaddyfile should be able to
+ // handle more than one segment); otherwise, we'd overwrite other
+ // instances of the matcher in this set
+ tokensByMatcherName := make(map[string][]caddyfile.Token)
+ for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
+ matcherName := d.Val()
+ tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
+ }
+
+ for matcherName, tokens := range tokensByMatcherName {
+ mod, err := caddy.GetModule("http.matchers." + matcherName)
+ if err != nil {
+ return nil, d.Errf("getting matcher module '%s': %v", matcherName, err)
+ }
+ unm, ok := mod.New().(caddyfile.Unmarshaler)
+ if !ok {
+ return nil, d.Errf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
+ }
+ err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
+ if err != nil {
+ return nil, err
+ }
+ if rm, ok := unm.(RequestMatcherWithError); ok {
+ matcherMap[matcherName] = rm
+ continue
+ }
+ if rm, ok := unm.(RequestMatcher); ok {
+ matcherMap[matcherName] = rm
+ continue
+ }
+ return nil, fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
+ }
+
+ // we should now have a functional matcher, but we also
+ // need to be able to marshal as JSON, otherwise config
+ // adaptation will be missing the matchers!
+ matcherSet := make(caddy.ModuleMap)
+ for name, matcher := range matcherMap {
+ jsonBytes, err := json.Marshal(matcher)
+ if err != nil {
+ return nil, fmt.Errorf("marshaling %T matcher: %v", matcher, err)
+ }
+ matcherSet[name] = jsonBytes
+ }
+
+ return matcherSet, nil
+}
+
+var wordRE = regexp.MustCompile(`\w+`)
+
+const regexpPlaceholderPrefix = "http.regexp"
+
+// MatcherErrorVarKey is the key used for the variable that
+// holds an optional error emitted from a request matcher,
+// to short-circuit the handler chain, since matchers cannot
+// return errors via the RequestMatcher interface.
+//
+// Deprecated: Matchers should implement RequestMatcherWithError
+// which can return an error directly, instead of smuggling it
+// through the vars map.
+const MatcherErrorVarKey = "matchers.error"
+
+// Interface guards
+var (
+ _ RequestMatcherWithError = (*MatchHost)(nil)
+ _ caddy.Provisioner = (*MatchHost)(nil)
+ _ RequestMatcherWithError = (*MatchPath)(nil)
+ _ RequestMatcherWithError = (*MatchPathRE)(nil)
+ _ caddy.Provisioner = (*MatchPathRE)(nil)
+ _ RequestMatcherWithError = (*MatchMethod)(nil)
+ _ RequestMatcherWithError = (*MatchQuery)(nil)
+ _ RequestMatcherWithError = (*MatchHeader)(nil)
+ _ RequestMatcherWithError = (*MatchHeaderRE)(nil)
+ _ caddy.Provisioner = (*MatchHeaderRE)(nil)
+ _ RequestMatcherWithError = (*MatchProtocol)(nil)
+ _ RequestMatcherWithError = (*MatchNot)(nil)
+ _ caddy.Provisioner = (*MatchNot)(nil)
+ _ caddy.Provisioner = (*MatchRegexp)(nil)
+
+ _ caddyfile.Unmarshaler = (*MatchHost)(nil)
+ _ caddyfile.Unmarshaler = (*MatchPath)(nil)
+ _ caddyfile.Unmarshaler = (*MatchPathRE)(nil)
+ _ caddyfile.Unmarshaler = (*MatchMethod)(nil)
+ _ caddyfile.Unmarshaler = (*MatchQuery)(nil)
+ _ caddyfile.Unmarshaler = (*MatchHeader)(nil)
+ _ caddyfile.Unmarshaler = (*MatchHeaderRE)(nil)
+ _ caddyfile.Unmarshaler = (*MatchProtocol)(nil)
+ _ caddyfile.Unmarshaler = (*VarsMatcher)(nil)
+ _ caddyfile.Unmarshaler = (*MatchVarsRE)(nil)
+
+ _ CELLibraryProducer = (*MatchHost)(nil)
+ _ CELLibraryProducer = (*MatchPath)(nil)
+ _ CELLibraryProducer = (*MatchPathRE)(nil)
+ _ CELLibraryProducer = (*MatchMethod)(nil)
+ _ CELLibraryProducer = (*MatchQuery)(nil)
+ _ CELLibraryProducer = (*MatchHeader)(nil)
+ _ CELLibraryProducer = (*MatchHeaderRE)(nil)
+ _ CELLibraryProducer = (*MatchProtocol)(nil)
+ _ CELLibraryProducer = (*VarsMatcher)(nil)
+ _ CELLibraryProducer = (*MatchVarsRE)(nil)
+
+ _ json.Marshaler = (*MatchNot)(nil)
+ _ json.Unmarshaler = (*MatchNot)(nil)
+)
diff --git a/modules/caddyhttp/matchers_test.go b/modules/caddyhttp/matchers_test.go
new file mode 100644
index 00000000000..f7be6909efc
--- /dev/null
+++ b/modules/caddyhttp/matchers_test.go
@@ -0,0 +1,1219 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "runtime"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+func TestHostMatcher(t *testing.T) {
+ err := os.Setenv("GO_BENCHMARK_DOMAIN", "localhost")
+ if err != nil {
+ t.Errorf("error while setting up environment: %v", err)
+ }
+
+ for i, tc := range []struct {
+ match MatchHost
+ input string
+ expect bool
+ }{
+ {
+ match: MatchHost{},
+ input: "example.com",
+ expect: false,
+ },
+ {
+ match: MatchHost{"example.com"},
+ input: "example.com",
+ expect: true,
+ },
+ {
+ match: MatchHost{"EXAMPLE.COM"},
+ input: "example.com",
+ expect: true,
+ },
+ {
+ match: MatchHost{"example.com"},
+ input: "EXAMPLE.COM",
+ expect: true,
+ },
+ {
+ match: MatchHost{"example.com"},
+ input: "foo.example.com",
+ expect: false,
+ },
+ {
+ match: MatchHost{"example.com"},
+ input: "EXAMPLE.COM",
+ expect: true,
+ },
+ {
+ match: MatchHost{"foo.example.com"},
+ input: "foo.example.com",
+ expect: true,
+ },
+ {
+ match: MatchHost{"foo.example.com"},
+ input: "bar.example.com",
+ expect: false,
+ },
+ {
+ match: MatchHost{"éxàmplê.com"},
+ input: "xn--xmpl-0na6cm.com",
+ expect: true,
+ },
+ {
+ match: MatchHost{"*.example.com"},
+ input: "example.com",
+ expect: false,
+ },
+ {
+ match: MatchHost{"*.example.com"},
+ input: "SUB.EXAMPLE.COM",
+ expect: true,
+ },
+ {
+ match: MatchHost{"*.example.com"},
+ input: "foo.example.com",
+ expect: true,
+ },
+ {
+ match: MatchHost{"*.example.com"},
+ input: "foo.bar.example.com",
+ expect: false,
+ },
+ {
+ match: MatchHost{"*.example.com", "example.net"},
+ input: "example.net",
+ expect: true,
+ },
+ {
+ match: MatchHost{"example.net", "*.example.com"},
+ input: "foo.example.com",
+ expect: true,
+ },
+ {
+ match: MatchHost{"*.example.net", "*.*.example.com"},
+ input: "foo.bar.example.com",
+ expect: true,
+ },
+ {
+ match: MatchHost{"*.example.net", "sub.*.example.com"},
+ input: "sub.foo.example.com",
+ expect: true,
+ },
+ {
+ match: MatchHost{"*.example.net", "sub.*.example.com"},
+ input: "sub.foo.example.net",
+ expect: false,
+ },
+ {
+ match: MatchHost{"www.*.*"},
+ input: "www.example.com",
+ expect: true,
+ },
+ {
+ match: MatchHost{"example.com"},
+ input: "example.com:5555",
+ expect: true,
+ },
+ {
+ match: MatchHost{"{env.GO_BENCHMARK_DOMAIN}"},
+ input: "localhost",
+ expect: true,
+ },
+ {
+ match: MatchHost{"{env.GO_NONEXISTENT}"},
+ input: "localhost",
+ expect: false,
+ },
+ } {
+ req := &http.Request{Host: tc.input}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ if err := tc.match.Provision(caddy.Context{}); err != nil {
+ t.Errorf("Test %d %v: provisioning failed: %v", i, tc.match, err)
+ }
+
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
+ if actual != tc.expect {
+ t.Errorf("Test %d %v: Expected %t, got %t for '%s'", i, tc.match, tc.expect, actual, tc.input)
+ continue
+ }
+ }
+}
+
+func TestPathMatcher(t *testing.T) {
+ for i, tc := range []struct {
+ match MatchPath // not URI-encoded because not parsing from a URI
+ input string // should be valid URI encoding (escaped) since it will become part of a request
+ expect bool
+ provisionErr bool
+ }{
+ {
+ match: MatchPath{},
+ input: "/",
+ expect: false,
+ },
+ {
+ match: MatchPath{"/"},
+ input: "/",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/bar"},
+ input: "/",
+ expect: false,
+ },
+ {
+ match: MatchPath{"/foo/bar"},
+ input: "/foo/bar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/bar/"},
+ input: "/foo/bar",
+ expect: false,
+ },
+ {
+ match: MatchPath{"/foo/bar/"},
+ input: "/foo/bar/",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/bar/", "/other"},
+ input: "/other/",
+ expect: false,
+ },
+ {
+ match: MatchPath{"/foo/bar/", "/other"},
+ input: "/other",
+ expect: true,
+ },
+ {
+ match: MatchPath{"*.ext"},
+ input: "/foo/bar.ext",
+ expect: true,
+ },
+ {
+ match: MatchPath{"*.php"},
+ input: "/index.PHP",
+ expect: true,
+ },
+ {
+ match: MatchPath{"*.ext"},
+ input: "/foo/bar.ext",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/*/baz"},
+ input: "/foo/bar/baz",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/*/baz/bam"},
+ input: "/foo/bar/bam",
+ expect: false,
+ },
+ {
+ match: MatchPath{"*substring*"},
+ input: "/foo/substring/bar.txt",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo"},
+ input: "/foo/bar",
+ expect: false,
+ },
+ {
+ match: MatchPath{"/foo"},
+ input: "/foo/bar",
+ expect: false,
+ },
+ {
+ match: MatchPath{"/foo"},
+ input: "/FOO",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo*"},
+ input: "/FOOOO",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/bar.txt"},
+ input: "/foo/BAR.txt",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo*"},
+ input: "//foo/bar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo"},
+ input: "//foo",
+ expect: true,
+ },
+ {
+ match: MatchPath{"//foo"},
+ input: "/foo",
+ expect: false,
+ },
+ {
+ match: MatchPath{"//foo"},
+ input: "//foo",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo//*"},
+ input: "/foo//bar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo//*"},
+ input: "/foo/%2Fbar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/%2F*"},
+ input: "/foo/%2Fbar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/%2F*"},
+ input: "/foo//bar",
+ expect: false,
+ },
+ {
+ match: MatchPath{"/foo//bar"},
+ input: "/foo//bar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/*//bar"},
+ input: "/foo///bar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/%*//bar"},
+ input: "/foo///bar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo/%*//bar"},
+ input: "/foo//%2Fbar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo*"},
+ input: "/%2F/foo",
+ expect: true,
+ },
+ {
+ match: MatchPath{"*"},
+ input: "/",
+ expect: true,
+ },
+ {
+ match: MatchPath{"*"},
+ input: "/foo/bar",
+ expect: true,
+ },
+ {
+ match: MatchPath{"**"},
+ input: "/",
+ expect: true,
+ },
+ {
+ match: MatchPath{"**"},
+ input: "/foo/bar",
+ expect: true,
+ },
+ // notice these next three test cases are the same normalized path but are written differently
+ {
+ match: MatchPath{"/%25@.txt"},
+ input: "/%25@.txt",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/%25@.txt"},
+ input: "/%25%40.txt",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/%25%40.txt"},
+ input: "/%25%40.txt",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/bands/*/*"},
+ input: "/bands/AC%2FDC/T.N.T",
+ expect: false, // because * operates in normalized space
+ },
+ {
+ match: MatchPath{"/bands/%*/%*"},
+ input: "/bands/AC%2FDC/T.N.T",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/bands/%*/%*"},
+ input: "/bands/AC/DC/T.N.T",
+ expect: false,
+ },
+ {
+ match: MatchPath{"/bands/%*"},
+ input: "/bands/AC/DC",
+ expect: false, // not a suffix match
+ },
+ {
+ match: MatchPath{"/bands/%*"},
+ input: "/bands/AC%2FDC",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo%2fbar/baz"},
+ input: "/foo%2Fbar/baz",
+ expect: true,
+ },
+ {
+ match: MatchPath{"/foo%2fbar/baz"},
+ input: "/foo/bar/baz",
+ expect: false,
+ },
+ {
+ match: MatchPath{"/foo/bar/baz"},
+ input: "/foo%2fbar/baz",
+ expect: true,
+ },
+ } {
+ err := tc.match.Provision(caddy.Context{})
+ if err == nil && tc.provisionErr {
+ t.Errorf("Test %d %v: Expected error provisioning, but there was no error", i, tc.match)
+ }
+ if err != nil && !tc.provisionErr {
+ t.Errorf("Test %d %v: Expected no error provisioning, but there was an error: %v", i, tc.match, err)
+ }
+ if tc.provisionErr {
+ continue // if it's not supposed to provision properly, pointless to test it
+ }
+
+ u, err := url.ParseRequestURI(tc.input)
+ if err != nil {
+ t.Fatalf("Test %d (%v): Invalid request URI (should be rejected by Go's HTTP server): %v", i, tc.input, err)
+ }
+ req := &http.Request{URL: u}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
+ if actual != tc.expect {
+ t.Errorf("Test %d %v: Expected %t, got %t for '%s'", i, tc.match, tc.expect, actual, tc.input)
+ continue
+ }
+ }
+}
+
+func TestPathMatcherWindows(t *testing.T) {
+ // only Windows has this bug where it will ignore
+ // trailing dots and spaces in a filename
+ if runtime.GOOS != "windows" {
+ return
+ }
+
+ req := &http.Request{URL: &url.URL{Path: "/index.php . . .."}}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ match := MatchPath{"*.php"}
+ matched, err := match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Expected no error, but got: %v", err)
+ }
+ if !matched {
+ t.Errorf("Expected to match; should ignore trailing dots and spaces")
+ }
+}
+
+func TestPathREMatcher(t *testing.T) {
+ for i, tc := range []struct {
+ match MatchPathRE
+ input string
+ expect bool
+ expectRepl map[string]string
+ }{
+ {
+ match: MatchPathRE{},
+ input: "/",
+ expect: true,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "/"}},
+ input: "/",
+ expect: true,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}},
+ input: "/foo",
+ expect: true,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}},
+ input: "/foo/",
+ expect: true,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}},
+ input: "//foo",
+ expect: true,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}},
+ input: "//foo/",
+ expect: true,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}},
+ input: "/%2F/foo/",
+ expect: true,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "/bar"}},
+ input: "/foo/",
+ expect: false,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/bar"}},
+ input: "/foo/bar",
+ expect: false,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/foo/(.*)/baz$", Name: "name"}},
+ input: "/foo/bar/baz",
+ expect: true,
+ expectRepl: map[string]string{"name.1": "bar"},
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/foo/(?P.*)/baz$", Name: "name"}},
+ input: "/foo/bar/baz",
+ expect: true,
+ expectRepl: map[string]string{"name.myparam": "bar"},
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/%@.txt"}},
+ input: "/%25@.txt",
+ expect: true,
+ },
+ {
+ match: MatchPathRE{MatchRegexp{Pattern: "^/%25@.txt"}},
+ input: "/%25@.txt",
+ expect: false,
+ },
+ } {
+ // compile the regexp and validate its name
+ err := tc.match.Provision(caddy.Context{})
+ if err != nil {
+ t.Errorf("Test %d %v: Provisioning: %v", i, tc.match, err)
+ continue
+ }
+ err = tc.match.Validate()
+ if err != nil {
+ t.Errorf("Test %d %v: Validating: %v", i, tc.match, err)
+ continue
+ }
+
+ // set up the fake request and its Replacer
+ u, err := url.ParseRequestURI(tc.input)
+ if err != nil {
+ t.Fatalf("Test %d: Bad input URI: %v", i, err)
+ }
+ req := &http.Request{URL: u}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+ addHTTPVarsToReplacer(repl, req, httptest.NewRecorder())
+
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
+ if actual != tc.expect {
+ t.Errorf("Test %d [%v]: Expected %t, got %t for input '%s'",
+ i, tc.match.Pattern, tc.expect, actual, tc.input)
+ continue
+ }
+
+ for key, expectVal := range tc.expectRepl {
+ placeholder := fmt.Sprintf("{http.regexp.%s}", key)
+ actualVal := repl.ReplaceAll(placeholder, "")
+ if actualVal != expectVal {
+ t.Errorf("Test %d [%v]: Expected placeholder {http.regexp.%s} to be '%s' but got '%s'",
+ i, tc.match.Pattern, key, expectVal, actualVal)
+ continue
+ }
+ }
+ }
+}
+
+func TestHeaderMatcher(t *testing.T) {
+ repl := caddy.NewReplacer()
+ repl.Set("a", "foobar")
+
+ for i, tc := range []struct {
+ match MatchHeader
+ input http.Header // make sure these are canonical cased (std lib will do that in a real request)
+ host string
+ expect bool
+ }{
+ {
+ match: MatchHeader{"Field": []string{"foo"}},
+ input: http.Header{"Field": []string{"foo"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"Field": []string{"foo", "bar"}},
+ input: http.Header{"Field": []string{"bar"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"Field": []string{"foo", "bar"}},
+ input: http.Header{"Alakazam": []string{"kapow"}},
+ expect: false,
+ },
+ {
+ match: MatchHeader{"Field": []string{"foo", "bar"}},
+ input: http.Header{"Field": []string{"kapow"}},
+ expect: false,
+ },
+ {
+ match: MatchHeader{"Field": []string{"foo", "bar"}},
+ input: http.Header{"Field": []string{"kapow", "foo"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"Field1": []string{"foo"}, "Field2": []string{"bar"}},
+ input: http.Header{"Field1": []string{"foo"}, "Field2": []string{"bar"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"field1": []string{"foo"}, "field2": []string{"bar"}},
+ input: http.Header{"Field1": []string{"foo"}, "Field2": []string{"bar"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"field1": []string{"foo"}, "field2": []string{"bar"}},
+ input: http.Header{"Field1": []string{"foo"}, "Field2": []string{"kapow"}},
+ expect: false,
+ },
+ {
+ match: MatchHeader{"field1": []string{"*"}},
+ input: http.Header{"Field1": []string{"foo"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"field1": []string{"*"}},
+ input: http.Header{"Field2": []string{"foo"}},
+ expect: false,
+ },
+ {
+ match: MatchHeader{"Field1": []string{"foo*"}},
+ input: http.Header{"Field1": []string{"foo"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"Field1": []string{"foo*"}},
+ input: http.Header{"Field1": []string{"asdf", "foobar"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"Field1": []string{"*bar"}},
+ input: http.Header{"Field1": []string{"asdf", "foobar"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"host": []string{"localhost"}},
+ input: http.Header{},
+ host: "localhost",
+ expect: true,
+ },
+ {
+ match: MatchHeader{"host": []string{"localhost"}},
+ input: http.Header{},
+ host: "caddyserver.com",
+ expect: false,
+ },
+ {
+ match: MatchHeader{"Must-Not-Exist": nil},
+ input: http.Header{},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"Must-Not-Exist": nil},
+ input: http.Header{"Must-Not-Exist": []string{"do not match"}},
+ expect: false,
+ },
+ {
+ match: MatchHeader{"Foo": []string{"{a}"}},
+ input: http.Header{"Foo": []string{"foobar"}},
+ expect: true,
+ },
+ {
+ match: MatchHeader{"Foo": []string{"{a}"}},
+ input: http.Header{"Foo": []string{"asdf"}},
+ expect: false,
+ },
+ {
+ match: MatchHeader{"Foo": []string{"{a}*"}},
+ input: http.Header{"Foo": []string{"foobar-baz"}},
+ expect: true,
+ },
+ } {
+ req := &http.Request{Header: tc.input, Host: tc.host}
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
+ if actual != tc.expect {
+ t.Errorf("Test %d %v: Expected %t, got %t for '%s'", i, tc.match, tc.expect, actual, tc.input)
+ continue
+ }
+ }
+}
+
+func TestQueryMatcher(t *testing.T) {
+ for i, tc := range []struct {
+ scenario string
+ match MatchQuery
+ input string
+ expect bool
+ }{
+ {
+ scenario: "non match against a specific value",
+ match: MatchQuery{"debug": []string{"1"}},
+ input: "/",
+ expect: false,
+ },
+ {
+ scenario: "match against a specific value",
+ match: MatchQuery{"debug": []string{"1"}},
+ input: "/?debug=1",
+ expect: true,
+ },
+ {
+ scenario: "match against a wildcard",
+ match: MatchQuery{"debug": []string{"*"}},
+ input: "/?debug=something",
+ expect: true,
+ },
+ {
+ scenario: "non match against a wildcarded",
+ match: MatchQuery{"debug": []string{"*"}},
+ input: "/?other=something",
+ expect: false,
+ },
+ {
+ scenario: "match against an empty value",
+ match: MatchQuery{"debug": []string{""}},
+ input: "/?debug",
+ expect: true,
+ },
+ {
+ scenario: "non match against an empty value",
+ match: MatchQuery{"debug": []string{""}},
+ input: "/?someparam",
+ expect: false,
+ },
+ {
+ scenario: "empty matcher value should match empty query",
+ match: MatchQuery{},
+ input: "/?",
+ expect: true,
+ },
+ {
+ scenario: "nil matcher value should NOT match a non-empty query",
+ match: MatchQuery{},
+ input: "/?foo=bar",
+ expect: false,
+ },
+ {
+ scenario: "non-nil matcher should NOT match an empty query",
+ match: MatchQuery{"": nil},
+ input: "/?",
+ expect: false,
+ },
+ {
+ scenario: "match against a placeholder value",
+ match: MatchQuery{"debug": []string{"{http.vars.debug}"}},
+ input: "/?debug=1",
+ expect: true,
+ },
+ {
+ scenario: "match against a placeholder key",
+ match: MatchQuery{"{http.vars.key}": []string{"1"}},
+ input: "/?somekey=1",
+ expect: true,
+ },
+ {
+ scenario: "do not match when not all query params are present",
+ match: MatchQuery{"debug": []string{"1"}, "foo": []string{"bar"}},
+ input: "/?debug=1",
+ expect: false,
+ },
+ {
+ scenario: "match when all query params are present",
+ match: MatchQuery{"debug": []string{"1"}, "foo": []string{"bar"}},
+ input: "/?debug=1&foo=bar",
+ expect: true,
+ },
+ {
+ scenario: "do not match when the value of a query param does not match",
+ match: MatchQuery{"debug": []string{"1"}, "foo": []string{"bar"}},
+ input: "/?debug=2&foo=bar",
+ expect: false,
+ },
+ {
+ scenario: "do not match when all the values the query params do not match",
+ match: MatchQuery{"debug": []string{"1"}, "foo": []string{"bar"}},
+ input: "/?debug=2&foo=baz",
+ expect: false,
+ },
+ {
+ scenario: "match against two values for the same key",
+ match: MatchQuery{"debug": []string{"1"}},
+ input: "/?debug=1&debug=2",
+ expect: true,
+ },
+ {
+ scenario: "match against two values for the same key",
+ match: MatchQuery{"debug": []string{"2", "1"}},
+ input: "/?debug=2&debug=1",
+ expect: true,
+ },
+ } {
+
+ u, _ := url.Parse(tc.input)
+
+ req := &http.Request{URL: u}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ repl.Set("http.vars.debug", "1")
+ repl.Set("http.vars.key", "somekey")
+ req = req.WithContext(ctx)
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
+ if actual != tc.expect {
+ t.Errorf("Test %d %v: Expected %t, got %t for '%s'", i, tc.match, tc.expect, actual, tc.input)
+ continue
+ }
+ }
+}
+
+func TestHeaderREMatcher(t *testing.T) {
+ for i, tc := range []struct {
+ match MatchHeaderRE
+ input http.Header // make sure these are canonical cased (std lib will do that in a real request)
+ host string
+ expect bool
+ expectRepl map[string]string
+ }{
+ {
+ match: MatchHeaderRE{"Field": &MatchRegexp{Pattern: "foo"}},
+ input: http.Header{"Field": []string{"foo"}},
+ expect: true,
+ },
+ {
+ match: MatchHeaderRE{"Field": &MatchRegexp{Pattern: "$foo^"}},
+ input: http.Header{"Field": []string{"foobar"}},
+ expect: false,
+ },
+ {
+ match: MatchHeaderRE{"Field": &MatchRegexp{Pattern: "^foo(.*)$", Name: "name"}},
+ input: http.Header{"Field": []string{"foobar"}},
+ expect: true,
+ expectRepl: map[string]string{"name.1": "bar"},
+ },
+ {
+ match: MatchHeaderRE{"Field": &MatchRegexp{Pattern: "^foo.*$", Name: "name"}},
+ input: http.Header{"Field": []string{"barfoo", "foobar"}},
+ expect: true,
+ },
+ {
+ match: MatchHeaderRE{"host": &MatchRegexp{Pattern: "^localhost$", Name: "name"}},
+ input: http.Header{},
+ host: "localhost",
+ expect: true,
+ },
+ {
+ match: MatchHeaderRE{"host": &MatchRegexp{Pattern: "^local$", Name: "name"}},
+ input: http.Header{},
+ host: "localhost",
+ expect: false,
+ },
+ } {
+ // compile the regexp and validate its name
+ err := tc.match.Provision(caddy.Context{})
+ if err != nil {
+ t.Errorf("Test %d %v: Provisioning: %v", i, tc.match, err)
+ continue
+ }
+ err = tc.match.Validate()
+ if err != nil {
+ t.Errorf("Test %d %v: Validating: %v", i, tc.match, err)
+ continue
+ }
+
+ // set up the fake request and its Replacer
+ req := &http.Request{Header: tc.input, URL: new(url.URL), Host: tc.host}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+ addHTTPVarsToReplacer(repl, req, httptest.NewRecorder())
+
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
+ if actual != tc.expect {
+ t.Errorf("Test %d [%v]: Expected %t, got %t for input '%s'",
+ i, tc.match, tc.expect, actual, tc.input)
+ continue
+ }
+
+ for key, expectVal := range tc.expectRepl {
+ placeholder := fmt.Sprintf("{http.regexp.%s}", key)
+ actualVal := repl.ReplaceAll(placeholder, "")
+ if actualVal != expectVal {
+ t.Errorf("Test %d [%v]: Expected placeholder {http.regexp.%s} to be '%s' but got '%s'",
+ i, tc.match, key, expectVal, actualVal)
+ continue
+ }
+ }
+ }
+}
+
+func BenchmarkHeaderREMatcher(b *testing.B) {
+ i := 0
+ match := MatchHeaderRE{"Field": &MatchRegexp{Pattern: "^foo(.*)$", Name: "name"}}
+ input := http.Header{"Field": []string{"foobar"}}
+ var host string
+ err := match.Provision(caddy.Context{})
+ if err != nil {
+ b.Errorf("Test %d %v: Provisioning: %v", i, match, err)
+ }
+ err = match.Validate()
+ if err != nil {
+ b.Errorf("Test %d %v: Validating: %v", i, match, err)
+ }
+
+ // set up the fake request and its Replacer
+ req := &http.Request{Header: input, URL: new(url.URL), Host: host}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+ addHTTPVarsToReplacer(repl, req, httptest.NewRecorder())
+ for run := 0; run < b.N; run++ {
+ match.MatchWithError(req)
+ }
+}
+
+func TestVarREMatcher(t *testing.T) {
+ for i, tc := range []struct {
+ desc string
+ match MatchVarsRE
+ input VarsMiddleware
+ expect bool
+ expectRepl map[string]string
+ }{
+ {
+ desc: "match static value within var set by the VarsMiddleware succeeds",
+ match: MatchVarsRE{"Var1": &MatchRegexp{Pattern: "foo"}},
+ input: VarsMiddleware{"Var1": "here is foo val"},
+ expect: true,
+ },
+ {
+ desc: "value set by VarsMiddleware not satisfying regexp matcher fails to match",
+ match: MatchVarsRE{"Var1": &MatchRegexp{Pattern: "$foo^"}},
+ input: VarsMiddleware{"Var1": "foobar"},
+ expect: false,
+ },
+ {
+ desc: "successfully matched value is captured and its placeholder is added to replacer",
+ match: MatchVarsRE{"Var1": &MatchRegexp{Pattern: "^foo(.*)$", Name: "name"}},
+ input: VarsMiddleware{"Var1": "foobar"},
+ expect: true,
+ expectRepl: map[string]string{"name.1": "bar"},
+ },
+ {
+ desc: "matching against a value of standard variables succeeds",
+ match: MatchVarsRE{"{http.request.method}": &MatchRegexp{Pattern: "^G.[tT]$"}},
+ input: VarsMiddleware{},
+ expect: true,
+ },
+ {
+ desc: "matching against value of var set by the VarsMiddleware and referenced by its placeholder succeeds",
+ match: MatchVarsRE{"{http.vars.Var1}": &MatchRegexp{Pattern: "[vV]ar[0-9]"}},
+ input: VarsMiddleware{"Var1": "var1Value"},
+ expect: true,
+ },
+ } {
+ i := i // capture range value
+ tc := tc // capture range value
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+ // compile the regexp and validate its name
+ err := tc.match.Provision(caddy.Context{})
+ if err != nil {
+ t.Errorf("Test %d %v: Provisioning: %v", i, tc.match, err)
+ return
+ }
+ err = tc.match.Validate()
+ if err != nil {
+ t.Errorf("Test %d %v: Validating: %v", i, tc.match, err)
+ return
+ }
+
+ // set up the fake request and its Replacer
+ req := &http.Request{URL: new(url.URL), Method: http.MethodGet}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]any))
+ req = req.WithContext(ctx)
+
+ addHTTPVarsToReplacer(repl, req, httptest.NewRecorder())
+
+ tc.input.ServeHTTP(httptest.NewRecorder(), req, emptyHandler)
+
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
+ if actual != tc.expect {
+ t.Errorf("Test %d [%v]: Expected %t, got %t for input '%s'",
+ i, tc.match, tc.expect, actual, tc.input)
+ return
+ }
+
+ for key, expectVal := range tc.expectRepl {
+ placeholder := fmt.Sprintf("{http.regexp.%s}", key)
+ actualVal := repl.ReplaceAll(placeholder, "")
+ if actualVal != expectVal {
+ t.Errorf("Test %d [%v]: Expected placeholder {http.regexp.%s} to be '%s' but got '%s'",
+ i, tc.match, key, expectVal, actualVal)
+ return
+ }
+ }
+ })
+ }
+}
+
+func TestNotMatcher(t *testing.T) {
+ for i, tc := range []struct {
+ host, path string
+ match MatchNot
+ expect bool
+ }{
+ {
+ host: "example.com", path: "/",
+ match: MatchNot{},
+ expect: true,
+ },
+ {
+ host: "example.com", path: "/foo",
+ match: MatchNot{
+ MatcherSets: []MatcherSet{
+ {
+ MatchPath{"/foo"},
+ },
+ },
+ },
+ expect: false,
+ },
+ {
+ host: "example.com", path: "/bar",
+ match: MatchNot{
+ MatcherSets: []MatcherSet{
+ {
+ MatchPath{"/foo"},
+ },
+ },
+ },
+ expect: true,
+ },
+ {
+ host: "example.com", path: "/bar",
+ match: MatchNot{
+ MatcherSets: []MatcherSet{
+ {
+ MatchPath{"/foo"},
+ },
+ {
+ MatchHost{"example.com"},
+ },
+ },
+ },
+ expect: false,
+ },
+ {
+ host: "example.com", path: "/bar",
+ match: MatchNot{
+ MatcherSets: []MatcherSet{
+ {
+ MatchPath{"/bar"},
+ },
+ {
+ MatchHost{"example.com"},
+ },
+ },
+ },
+ expect: false,
+ },
+ {
+ host: "example.com", path: "/foo",
+ match: MatchNot{
+ MatcherSets: []MatcherSet{
+ {
+ MatchPath{"/bar"},
+ },
+ {
+ MatchHost{"sub.example.com"},
+ },
+ },
+ },
+ expect: true,
+ },
+ {
+ host: "example.com", path: "/foo",
+ match: MatchNot{
+ MatcherSets: []MatcherSet{
+ {
+ MatchPath{"/foo"},
+ MatchHost{"example.com"},
+ },
+ },
+ },
+ expect: false,
+ },
+ {
+ host: "example.com", path: "/foo",
+ match: MatchNot{
+ MatcherSets: []MatcherSet{
+ {
+ MatchPath{"/bar"},
+ MatchHost{"example.com"},
+ },
+ },
+ },
+ expect: true,
+ },
+ } {
+ req := &http.Request{Host: tc.host, URL: &url.URL{Path: tc.path}}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ actual, err := tc.match.MatchWithError(req)
+ if err != nil {
+ t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err)
+ }
+ if actual != tc.expect {
+ t.Errorf("Test %d %+v: Expected %t, got %t for: host=%s path=%s'", i, tc.match, tc.expect, actual, tc.host, tc.path)
+ continue
+ }
+ }
+}
+
+func BenchmarkLargeHostMatcher(b *testing.B) {
+ // this benchmark simulates a large host matcher (thousands of entries) where each
+ // value is an exact hostname (not a placeholder or wildcard) - compare the results
+ // of this with and without the binary search (comment out the various fast path
+ // sections in Match) to conduct experiments
+
+ const n = 10000
+ lastHost := fmt.Sprintf("%d.example.com", n-1)
+ req := &http.Request{Host: lastHost}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ matcher := make(MatchHost, n)
+ for i := 0; i < n; i++ {
+ matcher[i] = fmt.Sprintf("%d.example.com", i)
+ }
+ err := matcher.Provision(caddy.Context{})
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ matcher.MatchWithError(req)
+ }
+}
+
+func BenchmarkHostMatcherWithoutPlaceholder(b *testing.B) {
+ req := &http.Request{Host: "localhost"}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+
+ match := MatchHost{"localhost"}
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ match.MatchWithError(req)
+ }
+}
+
+func BenchmarkHostMatcherWithPlaceholder(b *testing.B) {
+ err := os.Setenv("GO_BENCHMARK_DOMAIN", "localhost")
+ if err != nil {
+ b.Errorf("error while setting up environment: %v", err)
+ }
+
+ req := &http.Request{Host: "localhost"}
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ req = req.WithContext(ctx)
+ match := MatchHost{"{env.GO_BENCHMARK_DOMAIN}"}
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ match.MatchWithError(req)
+ }
+}
diff --git a/modules/caddyhttp/metrics.go b/modules/caddyhttp/metrics.go
new file mode 100644
index 00000000000..9bb97e0b47b
--- /dev/null
+++ b/modules/caddyhttp/metrics.go
@@ -0,0 +1,214 @@
+package caddyhttp
+
+import (
+ "context"
+ "errors"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/internal/metrics"
+)
+
+// Metrics configures metrics observations.
+// EXPERIMENTAL and subject to change or removal.
+type Metrics struct {
+ // Enable per-host metrics. Enabling this option may
+ // incur high-memory consumption, depending on the number of hosts
+ // managed by Caddy.
+ PerHost bool `json:"per_host,omitempty"`
+
+ init sync.Once
+ httpMetrics *httpMetrics `json:"-"`
+}
+
+type httpMetrics struct {
+ requestInFlight *prometheus.GaugeVec
+ requestCount *prometheus.CounterVec
+ requestErrors *prometheus.CounterVec
+ requestDuration *prometheus.HistogramVec
+ requestSize *prometheus.HistogramVec
+ responseSize *prometheus.HistogramVec
+ responseDuration *prometheus.HistogramVec
+}
+
+func initHTTPMetrics(ctx caddy.Context, metrics *Metrics) {
+ const ns, sub = "caddy", "http"
+ registry := ctx.GetMetricsRegistry()
+ basicLabels := []string{"server", "handler"}
+ if metrics.PerHost {
+ basicLabels = append(basicLabels, "host")
+ }
+ metrics.httpMetrics.requestInFlight = promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "requests_in_flight",
+ Help: "Number of requests currently handled by this server.",
+ }, basicLabels)
+ metrics.httpMetrics.requestErrors = promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "request_errors_total",
+ Help: "Number of requests resulting in middleware errors.",
+ }, basicLabels)
+ metrics.httpMetrics.requestCount = promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "requests_total",
+ Help: "Counter of HTTP(S) requests made.",
+ }, basicLabels)
+
+ // TODO: allow these to be customized in the config
+ durationBuckets := prometheus.DefBuckets
+ sizeBuckets := prometheus.ExponentialBuckets(256, 4, 8)
+
+ httpLabels := []string{"server", "handler", "code", "method"}
+ if metrics.PerHost {
+ httpLabels = append(httpLabels, "host")
+ }
+ metrics.httpMetrics.requestDuration = promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "request_duration_seconds",
+ Help: "Histogram of round-trip request durations.",
+ Buckets: durationBuckets,
+ }, httpLabels)
+ metrics.httpMetrics.requestSize = promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "request_size_bytes",
+ Help: "Total size of the request. Includes body",
+ Buckets: sizeBuckets,
+ }, httpLabels)
+ metrics.httpMetrics.responseSize = promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "response_size_bytes",
+ Help: "Size of the returned response.",
+ Buckets: sizeBuckets,
+ }, httpLabels)
+ metrics.httpMetrics.responseDuration = promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: ns,
+ Subsystem: sub,
+ Name: "response_duration_seconds",
+ Help: "Histogram of times to first byte in response bodies.",
+ Buckets: durationBuckets,
+ }, httpLabels)
+}
+
+// serverNameFromContext extracts the current server name from the context.
+// Returns "UNKNOWN" if none is available (should probably never happen).
+func serverNameFromContext(ctx context.Context) string {
+ srv, ok := ctx.Value(ServerCtxKey).(*Server)
+ if !ok || srv == nil || srv.name == "" {
+ return "UNKNOWN"
+ }
+ return srv.name
+}
+
+type metricsInstrumentedHandler struct {
+ handler string
+ mh MiddlewareHandler
+ metrics *Metrics
+}
+
+func newMetricsInstrumentedHandler(ctx caddy.Context, handler string, mh MiddlewareHandler, metrics *Metrics) *metricsInstrumentedHandler {
+ metrics.init.Do(func() {
+ initHTTPMetrics(ctx, metrics)
+ })
+
+ return &metricsInstrumentedHandler{handler, mh, metrics}
+}
+
+func (h *metricsInstrumentedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
+ server := serverNameFromContext(r.Context())
+ labels := prometheus.Labels{"server": server, "handler": h.handler}
+ method := metrics.SanitizeMethod(r.Method)
+ // the "code" value is set later, but initialized here to eliminate the possibility
+ // of a panic
+ statusLabels := prometheus.Labels{"server": server, "handler": h.handler, "method": method, "code": ""}
+
+ if h.metrics.PerHost {
+ labels["host"] = strings.ToLower(r.Host)
+ statusLabels["host"] = strings.ToLower(r.Host)
+ }
+
+ inFlight := h.metrics.httpMetrics.requestInFlight.With(labels)
+ inFlight.Inc()
+ defer inFlight.Dec()
+
+ start := time.Now()
+
+ // This is a _bit_ of a hack - it depends on the ShouldBufferFunc always
+ // being called when the headers are written.
+ // Effectively the same behaviour as promhttp.InstrumentHandlerTimeToWriteHeader.
+ writeHeaderRecorder := ShouldBufferFunc(func(status int, header http.Header) bool {
+ statusLabels["code"] = metrics.SanitizeCode(status)
+ ttfb := time.Since(start).Seconds()
+ h.metrics.httpMetrics.responseDuration.With(statusLabels).Observe(ttfb)
+ return false
+ })
+ wrec := NewResponseRecorder(w, nil, writeHeaderRecorder)
+ err := h.mh.ServeHTTP(wrec, r, next)
+ dur := time.Since(start).Seconds()
+ h.metrics.httpMetrics.requestCount.With(labels).Inc()
+
+ observeRequest := func(status int) {
+ // If the code hasn't been set yet, and we didn't encounter an error, we're
+ // probably falling through with an empty handler.
+ if statusLabels["code"] == "" {
+ // we still sanitize it, even though it's likely to be 0. A 200 is
+ // returned on fallthrough so we want to reflect that.
+ statusLabels["code"] = metrics.SanitizeCode(status)
+ }
+
+ h.metrics.httpMetrics.requestDuration.With(statusLabels).Observe(dur)
+ h.metrics.httpMetrics.requestSize.With(statusLabels).Observe(float64(computeApproximateRequestSize(r)))
+ h.metrics.httpMetrics.responseSize.With(statusLabels).Observe(float64(wrec.Size()))
+ }
+
+ if err != nil {
+ var handlerErr HandlerError
+ if errors.As(err, &handlerErr) {
+ observeRequest(handlerErr.StatusCode)
+ }
+
+ h.metrics.httpMetrics.requestErrors.With(labels).Inc()
+
+ return err
+ }
+
+ observeRequest(wrec.Status())
+
+ return nil
+}
+
+// taken from https://github.com/prometheus/client_golang/blob/6007b2b5cae01203111de55f753e76d8dac1f529/prometheus/promhttp/instrument_server.go#L298
+func computeApproximateRequestSize(r *http.Request) int {
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ return s
+}
diff --git a/modules/caddyhttp/metrics_test.go b/modules/caddyhttp/metrics_test.go
new file mode 100644
index 00000000000..4a0519b8769
--- /dev/null
+++ b/modules/caddyhttp/metrics_test.go
@@ -0,0 +1,385 @@
+package caddyhttp
+
+import (
+ "context"
+ "errors"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/prometheus/client_golang/prometheus/testutil"
+)
+
+func TestServerNameFromContext(t *testing.T) {
+ ctx := context.Background()
+ expected := "UNKNOWN"
+ if actual := serverNameFromContext(ctx); actual != expected {
+ t.Errorf("Not equal: expected %q, but got %q", expected, actual)
+ }
+
+ in := "foo"
+ ctx = context.WithValue(ctx, ServerCtxKey, &Server{name: in})
+ if actual := serverNameFromContext(ctx); actual != in {
+ t.Errorf("Not equal: expected %q, but got %q", in, actual)
+ }
+}
+
+func TestMetricsInstrumentedHandler(t *testing.T) {
+ ctx, _ := caddy.NewContext(caddy.Context{Context: context.Background()})
+ metrics := &Metrics{
+ init: sync.Once{},
+ httpMetrics: &httpMetrics{},
+ }
+ handlerErr := errors.New("oh noes")
+ response := []byte("hello world!")
+ h := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
+ if actual := testutil.ToFloat64(metrics.httpMetrics.requestInFlight); actual != 1.0 {
+ t.Errorf("Not same: expected %#v, but got %#v", 1.0, actual)
+ }
+ if handlerErr == nil {
+ w.Write(response)
+ }
+ return handlerErr
+ })
+
+ mh := middlewareHandlerFunc(func(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return h.ServeHTTP(w, r)
+ })
+
+ ih := newMetricsInstrumentedHandler(ctx, "bar", mh, metrics)
+
+ r := httptest.NewRequest("GET", "/", nil)
+ w := httptest.NewRecorder()
+
+ if actual := ih.ServeHTTP(w, r, h); actual != handlerErr {
+ t.Errorf("Not same: expected %#v, but got %#v", handlerErr, actual)
+ }
+ if actual := testutil.ToFloat64(metrics.httpMetrics.requestInFlight); actual != 0.0 {
+ t.Errorf("Not same: expected %#v, but got %#v", 0.0, actual)
+ }
+
+ handlerErr = nil
+ if err := ih.ServeHTTP(w, r, h); err != nil {
+ t.Errorf("Received unexpected error: %v", err)
+ }
+
+ // an empty handler - no errors, no header written
+ mh = middlewareHandlerFunc(func(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return nil
+ })
+ ih = newMetricsInstrumentedHandler(ctx, "empty", mh, metrics)
+ r = httptest.NewRequest("GET", "/", nil)
+ w = httptest.NewRecorder()
+
+ if err := ih.ServeHTTP(w, r, h); err != nil {
+ t.Errorf("Received unexpected error: %v", err)
+ }
+ if actual := w.Result().StatusCode; actual != 200 {
+ t.Errorf("Not same: expected status code %#v, but got %#v", 200, actual)
+ }
+ if actual := w.Result().Header; len(actual) != 0 {
+ t.Errorf("Not empty: expected headers to be empty, but got %#v", actual)
+ }
+
+ // handler returning an error with an HTTP status
+ mh = middlewareHandlerFunc(func(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return Error(http.StatusTooManyRequests, nil)
+ })
+
+ ih = newMetricsInstrumentedHandler(ctx, "foo", mh, metrics)
+
+ r = httptest.NewRequest("GET", "/", nil)
+ w = httptest.NewRecorder()
+
+ if err := ih.ServeHTTP(w, r, nil); err == nil {
+ t.Errorf("expected error to be propagated")
+ }
+
+ expected := `
+ # HELP caddy_http_request_duration_seconds Histogram of round-trip request durations.
+ # TYPE caddy_http_request_duration_seconds histogram
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.005"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.01"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.025"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.05"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.1"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.25"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.5"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="2.5"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="5"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="10"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_duration_seconds_count{code="429",handler="foo",method="GET",server="UNKNOWN"} 1
+ # HELP caddy_http_request_size_bytes Total size of the request. Includes body
+ # TYPE caddy_http_request_size_bytes histogram
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_size_bytes_sum{code="200",handler="bar",method="GET",server="UNKNOWN"} 23
+ caddy_http_request_size_bytes_count{code="200",handler="bar",method="GET",server="UNKNOWN"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_size_bytes_sum{code="200",handler="empty",method="GET",server="UNKNOWN"} 23
+ caddy_http_request_size_bytes_count{code="200",handler="empty",method="GET",server="UNKNOWN"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_size_bytes_sum{code="429",handler="foo",method="GET",server="UNKNOWN"} 23
+ caddy_http_request_size_bytes_count{code="429",handler="foo",method="GET",server="UNKNOWN"} 1
+ # HELP caddy_http_response_size_bytes Size of the returned response.
+ # TYPE caddy_http_response_size_bytes histogram
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_response_size_bytes_sum{code="200",handler="bar",method="GET",server="UNKNOWN"} 12
+ caddy_http_response_size_bytes_count{code="200",handler="bar",method="GET",server="UNKNOWN"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_response_size_bytes_sum{code="200",handler="empty",method="GET",server="UNKNOWN"} 0
+ caddy_http_response_size_bytes_count{code="200",handler="empty",method="GET",server="UNKNOWN"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_response_size_bytes_sum{code="429",handler="foo",method="GET",server="UNKNOWN"} 0
+ caddy_http_response_size_bytes_count{code="429",handler="foo",method="GET",server="UNKNOWN"} 1
+ # HELP caddy_http_request_errors_total Number of requests resulting in middleware errors.
+ # TYPE caddy_http_request_errors_total counter
+ caddy_http_request_errors_total{handler="bar",server="UNKNOWN"} 1
+ caddy_http_request_errors_total{handler="foo",server="UNKNOWN"} 1
+ `
+ if err := testutil.GatherAndCompare(ctx.GetMetricsRegistry(), strings.NewReader(expected),
+ "caddy_http_request_size_bytes",
+ "caddy_http_response_size_bytes",
+ // caddy_http_request_duration_seconds_sum will vary based on how long the test took to run,
+ // so we check just the _bucket and _count metrics
+ "caddy_http_request_duration_seconds_bucket",
+ "caddy_http_request_duration_seconds_count",
+ "caddy_http_request_errors_total",
+ ); err != nil {
+ t.Errorf("received unexpected error: %s", err)
+ }
+}
+
+func TestMetricsInstrumentedHandlerPerHost(t *testing.T) {
+ ctx, _ := caddy.NewContext(caddy.Context{Context: context.Background()})
+ metrics := &Metrics{
+ PerHost: true,
+ init: sync.Once{},
+ httpMetrics: &httpMetrics{},
+ }
+ handlerErr := errors.New("oh noes")
+ response := []byte("hello world!")
+ h := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
+ if actual := testutil.ToFloat64(metrics.httpMetrics.requestInFlight); actual != 1.0 {
+ t.Errorf("Not same: expected %#v, but got %#v", 1.0, actual)
+ }
+ if handlerErr == nil {
+ w.Write(response)
+ }
+ return handlerErr
+ })
+
+ mh := middlewareHandlerFunc(func(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return h.ServeHTTP(w, r)
+ })
+
+ ih := newMetricsInstrumentedHandler(ctx, "bar", mh, metrics)
+
+ r := httptest.NewRequest("GET", "/", nil)
+ w := httptest.NewRecorder()
+
+ if actual := ih.ServeHTTP(w, r, h); actual != handlerErr {
+ t.Errorf("Not same: expected %#v, but got %#v", handlerErr, actual)
+ }
+ if actual := testutil.ToFloat64(metrics.httpMetrics.requestInFlight); actual != 0.0 {
+ t.Errorf("Not same: expected %#v, but got %#v", 0.0, actual)
+ }
+
+ handlerErr = nil
+ if err := ih.ServeHTTP(w, r, h); err != nil {
+ t.Errorf("Received unexpected error: %v", err)
+ }
+
+ // an empty handler - no errors, no header written
+ mh = middlewareHandlerFunc(func(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return nil
+ })
+ ih = newMetricsInstrumentedHandler(ctx, "empty", mh, metrics)
+ r = httptest.NewRequest("GET", "/", nil)
+ w = httptest.NewRecorder()
+
+ if err := ih.ServeHTTP(w, r, h); err != nil {
+ t.Errorf("Received unexpected error: %v", err)
+ }
+ if actual := w.Result().StatusCode; actual != 200 {
+ t.Errorf("Not same: expected status code %#v, but got %#v", 200, actual)
+ }
+ if actual := w.Result().Header; len(actual) != 0 {
+ t.Errorf("Not empty: expected headers to be empty, but got %#v", actual)
+ }
+
+ // handler returning an error with an HTTP status
+ mh = middlewareHandlerFunc(func(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return Error(http.StatusTooManyRequests, nil)
+ })
+
+ ih = newMetricsInstrumentedHandler(ctx, "foo", mh, metrics)
+
+ r = httptest.NewRequest("GET", "/", nil)
+ w = httptest.NewRecorder()
+
+ if err := ih.ServeHTTP(w, r, nil); err == nil {
+ t.Errorf("expected error to be propagated")
+ }
+
+ expected := `
+ # HELP caddy_http_request_duration_seconds Histogram of round-trip request durations.
+ # TYPE caddy_http_request_duration_seconds histogram
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.005"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.01"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.025"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.05"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.1"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.25"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.5"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="2.5"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="5"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="10"} 1
+ caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_duration_seconds_count{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 1
+ # HELP caddy_http_request_size_bytes Total size of the request. Includes body
+ # TYPE caddy_http_request_size_bytes histogram
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_size_bytes_sum{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN"} 23
+ caddy_http_request_size_bytes_count{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_size_bytes_sum{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN"} 23
+ caddy_http_request_size_bytes_count{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_request_size_bytes_sum{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 23
+ caddy_http_request_size_bytes_count{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 1
+ # HELP caddy_http_response_size_bytes Size of the returned response.
+ # TYPE caddy_http_response_size_bytes histogram
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_response_size_bytes_sum{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN"} 12
+ caddy_http_response_size_bytes_count{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_response_size_bytes_sum{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN"} 0
+ caddy_http_response_size_bytes_count{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="256"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1
+ caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1
+ caddy_http_response_size_bytes_sum{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 0
+ caddy_http_response_size_bytes_count{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 1
+ # HELP caddy_http_request_errors_total Number of requests resulting in middleware errors.
+ # TYPE caddy_http_request_errors_total counter
+ caddy_http_request_errors_total{handler="bar",host="example.com",server="UNKNOWN"} 1
+ caddy_http_request_errors_total{handler="foo",host="example.com",server="UNKNOWN"} 1
+ `
+ if err := testutil.GatherAndCompare(ctx.GetMetricsRegistry(), strings.NewReader(expected),
+ "caddy_http_request_size_bytes",
+ "caddy_http_response_size_bytes",
+ // caddy_http_request_duration_seconds_sum will vary based on how long the test took to run,
+ // so we check just the _bucket and _count metrics
+ "caddy_http_request_duration_seconds_bucket",
+ "caddy_http_request_duration_seconds_count",
+ "caddy_http_request_errors_total",
+ ); err != nil {
+ t.Errorf("received unexpected error: %s", err)
+ }
+}
+
+type middlewareHandlerFunc func(http.ResponseWriter, *http.Request, Handler) error
+
+func (f middlewareHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request, h Handler) error {
+ return f(w, r, h)
+}
diff --git a/modules/caddyhttp/proxyprotocol/listenerwrapper.go b/modules/caddyhttp/proxyprotocol/listenerwrapper.go
new file mode 100644
index 00000000000..f1d170c38ca
--- /dev/null
+++ b/modules/caddyhttp/proxyprotocol/listenerwrapper.go
@@ -0,0 +1,144 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proxyprotocol
+
+import (
+ "net"
+ "net/netip"
+ "time"
+
+ goproxy "github.com/pires/go-proxyproto"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+// ListenerWrapper provides PROXY protocol support to Caddy by implementing
+// the caddy.ListenerWrapper interface. If a connection is received via Unix
+// socket, it's trusted. Otherwise, it's checked against the Allow/Deny lists,
+// then it's handled by the FallbackPolicy.
+//
+// It must be loaded before the `tls` listener because the PROXY protocol
+// encapsulates the TLS data.
+//
+// Credit goes to https://github.com/mastercactapus/caddy2-proxyprotocol for having
+// initially implemented this as a plugin.
+type ListenerWrapper struct {
+ // Timeout specifies an optional maximum time for
+ // the PROXY header to be received.
+ // If zero, timeout is disabled. Default is 5s.
+ Timeout caddy.Duration `json:"timeout,omitempty"`
+
+ // Allow is an optional list of CIDR ranges to
+ // allow/require PROXY headers from.
+ Allow []string `json:"allow,omitempty"`
+ allow []netip.Prefix
+
+ // Deny is an optional list of CIDR ranges to
+ // deny PROXY headers from.
+ Deny []string `json:"deny,omitempty"`
+ deny []netip.Prefix
+
+ // FallbackPolicy specifies the policy to use if the downstream
+ // IP address is not in the Allow list nor is in the Deny list.
+ //
+ // NOTE: The generated docs which describe the value of this
+ // field is wrong because of how this type unmarshals JSON in a
+ // custom way. The field expects a string, not a number.
+ //
+ // Accepted values are: IGNORE, USE, REJECT, REQUIRE, SKIP
+ //
+ // - IGNORE: address from PROXY header, but accept connection
+ //
+ // - USE: address from PROXY header
+ //
+ // - REJECT: connection when PROXY header is sent
+ // Note: even though the first read on the connection returns an error if
+ // a PROXY header is present, subsequent reads do not. It is the task of
+ // the code using the connection to handle that case properly.
+ //
+ // - REQUIRE: connection to send PROXY header, reject if not present
+ // Note: even though the first read on the connection returns an error if
+ // a PROXY header is not present, subsequent reads do not. It is the task
+ // of the code using the connection to handle that case properly.
+ //
+ // - SKIP: accepts a connection without requiring the PROXY header.
+ // Note: an example usage can be found in the SkipProxyHeaderForCIDR
+ // function.
+ //
+ // Default: IGNORE
+ //
+ // Policy definitions are here: https://pkg.go.dev/github.com/pires/go-proxyproto@v0.7.0#Policy
+ FallbackPolicy Policy `json:"fallback_policy,omitempty"`
+
+ policy goproxy.ConnPolicyFunc
+}
+
+// Provision sets up the listener wrapper.
+func (pp *ListenerWrapper) Provision(ctx caddy.Context) error {
+ for _, cidr := range pp.Allow {
+ ipnet, err := netip.ParsePrefix(cidr)
+ if err != nil {
+ return err
+ }
+ pp.allow = append(pp.allow, ipnet)
+ }
+ for _, cidr := range pp.Deny {
+ ipnet, err := netip.ParsePrefix(cidr)
+ if err != nil {
+ return err
+ }
+ pp.deny = append(pp.deny, ipnet)
+ }
+
+ pp.policy = func(options goproxy.ConnPolicyOptions) (goproxy.Policy, error) {
+ // trust unix sockets
+ if network := options.Upstream.Network(); caddy.IsUnixNetwork(network) || caddy.IsFdNetwork(network) {
+ return goproxy.USE, nil
+ }
+ ret := pp.FallbackPolicy
+ host, _, err := net.SplitHostPort(options.Upstream.String())
+ if err != nil {
+ return goproxy.REJECT, err
+ }
+
+ ip, err := netip.ParseAddr(host)
+ if err != nil {
+ return goproxy.REJECT, err
+ }
+ for _, ipnet := range pp.deny {
+ if ipnet.Contains(ip) {
+ return goproxy.REJECT, nil
+ }
+ }
+ for _, ipnet := range pp.allow {
+ if ipnet.Contains(ip) {
+ ret = PolicyUSE
+ break
+ }
+ }
+ return policyToGoProxyPolicy[ret], nil
+ }
+ return nil
+}
+
+// WrapListener adds PROXY protocol support to the listener.
+func (pp *ListenerWrapper) WrapListener(l net.Listener) net.Listener {
+ pl := &goproxy.Listener{
+ Listener: l,
+ ReadHeaderTimeout: time.Duration(pp.Timeout),
+ }
+ pl.ConnPolicy = pp.policy
+ return pl
+}
diff --git a/modules/caddyhttp/proxyprotocol/module.go b/modules/caddyhttp/proxyprotocol/module.go
new file mode 100644
index 00000000000..75a156a2071
--- /dev/null
+++ b/modules/caddyhttp/proxyprotocol/module.go
@@ -0,0 +1,87 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proxyprotocol
+
+import (
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+)
+
+func init() {
+ caddy.RegisterModule(ListenerWrapper{})
+}
+
+func (ListenerWrapper) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "caddy.listeners.proxy_protocol",
+ New: func() caddy.Module { return new(ListenerWrapper) },
+ }
+}
+
+// UnmarshalCaddyfile sets up the listener Listenerwrapper from Caddyfile tokens. Syntax:
+//
+// proxy_protocol {
+// timeout
+// allow
+// deny
+// fallback_policy
+// }
+func (w *ListenerWrapper) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ d.Next() // consume wrapper name
+
+ // No same-line options are supported
+ if d.NextArg() {
+ return d.ArgErr()
+ }
+
+ for d.NextBlock(0) {
+ switch d.Val() {
+ case "timeout":
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ dur, err := caddy.ParseDuration(d.Val())
+ if err != nil {
+ return d.Errf("parsing proxy_protocol timeout duration: %v", err)
+ }
+ w.Timeout = caddy.Duration(dur)
+
+ case "allow":
+ w.Allow = append(w.Allow, d.RemainingArgs()...)
+ case "deny":
+ w.Deny = append(w.Deny, d.RemainingArgs()...)
+ case "fallback_policy":
+ if !d.NextArg() {
+ return d.ArgErr()
+ }
+ p, err := parsePolicy(d.Val())
+ if err != nil {
+ return d.WrapErr(err)
+ }
+ w.FallbackPolicy = p
+ default:
+ return d.ArgErr()
+ }
+ }
+ return nil
+}
+
+// Interface guards
+var (
+ _ caddy.Provisioner = (*ListenerWrapper)(nil)
+ _ caddy.Module = (*ListenerWrapper)(nil)
+ _ caddy.ListenerWrapper = (*ListenerWrapper)(nil)
+ _ caddyfile.Unmarshaler = (*ListenerWrapper)(nil)
+)
diff --git a/modules/caddyhttp/proxyprotocol/policy.go b/modules/caddyhttp/proxyprotocol/policy.go
new file mode 100644
index 00000000000..6dc8beb45dc
--- /dev/null
+++ b/modules/caddyhttp/proxyprotocol/policy.go
@@ -0,0 +1,82 @@
+package proxyprotocol
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ goproxy "github.com/pires/go-proxyproto"
+)
+
+type Policy int
+
+// as defined in: https://pkg.go.dev/github.com/pires/go-proxyproto@v0.7.0#Policy
+const (
+ // IGNORE address from PROXY header, but accept connection
+ PolicyIGNORE Policy = iota
+ // USE address from PROXY header
+ PolicyUSE
+ // REJECT connection when PROXY header is sent
+ // Note: even though the first read on the connection returns an error if
+ // a PROXY header is present, subsequent reads do not. It is the task of
+ // the code using the connection to handle that case properly.
+ PolicyREJECT
+ // REQUIRE connection to send PROXY header, reject if not present
+ // Note: even though the first read on the connection returns an error if
+ // a PROXY header is not present, subsequent reads do not. It is the task
+ // of the code using the connection to handle that case properly.
+ PolicyREQUIRE
+ // SKIP accepts a connection without requiring the PROXY header
+ // Note: an example usage can be found in the SkipProxyHeaderForCIDR
+ // function.
+ PolicySKIP
+)
+
+var policyToGoProxyPolicy = map[Policy]goproxy.Policy{
+ PolicyUSE: goproxy.USE,
+ PolicyIGNORE: goproxy.IGNORE,
+ PolicyREJECT: goproxy.REJECT,
+ PolicyREQUIRE: goproxy.REQUIRE,
+ PolicySKIP: goproxy.SKIP,
+}
+
+var policyMap = map[Policy]string{
+ PolicyUSE: "USE",
+ PolicyIGNORE: "IGNORE",
+ PolicyREJECT: "REJECT",
+ PolicyREQUIRE: "REQUIRE",
+ PolicySKIP: "SKIP",
+}
+
+var policyMapRev = map[string]Policy{
+ "USE": PolicyUSE,
+ "IGNORE": PolicyIGNORE,
+ "REJECT": PolicyREJECT,
+ "REQUIRE": PolicyREQUIRE,
+ "SKIP": PolicySKIP,
+}
+
+// MarshalText implements the text marshaller method.
+func (x Policy) MarshalText() ([]byte, error) {
+ return []byte(policyMap[x]), nil
+}
+
+// UnmarshalText implements the text unmarshaller method.
+func (x *Policy) UnmarshalText(text []byte) error {
+ name := string(text)
+ tmp, err := parsePolicy(name)
+ if err != nil {
+ return err
+ }
+ *x = tmp
+ return nil
+}
+
+func parsePolicy(name string) (Policy, error) {
+ if x, ok := policyMapRev[strings.ToUpper(name)]; ok {
+ return x, nil
+ }
+ return Policy(0), fmt.Errorf("%s is %w", name, errInvalidPolicy)
+}
+
+var errInvalidPolicy = errors.New("invalid policy")
diff --git a/modules/caddyhttp/push/caddyfile.go b/modules/caddyhttp/push/caddyfile.go
new file mode 100644
index 00000000000..f56db81f98f
--- /dev/null
+++ b/modules/caddyhttp/push/caddyfile.go
@@ -0,0 +1,106 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package push
+
+import (
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers"
+)
+
+func init() {
+ httpcaddyfile.RegisterHandlerDirective("push", parseCaddyfile)
+}
+
+// parseCaddyfile sets up the push handler. Syntax:
+//
+// push [] [] {
+// [GET|HEAD]
+// headers {
+// [+] [ []]
+// -
+// }
+// }
+//
+// A single resource can be specified inline without opening a
+// block for the most common/simple case. Or, a block can be
+// opened and multiple resources can be specified, one per
+// line, optionally preceded by the method. The headers
+// subdirective can be used to customize the headers that
+// are set on each (synthetic) push request, using the same
+// syntax as the 'header' directive for request headers.
+// Placeholders are accepted in resource and header field
+// name and value and replacement tokens.
+func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive name
+
+ handler := new(Handler)
+
+ // inline resources
+ if h.NextArg() {
+ handler.Resources = append(handler.Resources, Resource{Target: h.Val()})
+ }
+
+ // optional block
+ for h.NextBlock(0) {
+ switch h.Val() {
+ case "headers":
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ for nesting := h.Nesting(); h.NextBlock(nesting); {
+ var err error
+
+ // include current token, which we treat as an argument here
+ args := []string{h.Val()}
+ args = append(args, h.RemainingArgs()...)
+
+ if handler.Headers == nil {
+ handler.Headers = new(HeaderConfig)
+ }
+
+ switch len(args) {
+ case 1:
+ err = headers.CaddyfileHeaderOp(&handler.Headers.HeaderOps, args[0], "", nil)
+ case 2:
+ err = headers.CaddyfileHeaderOp(&handler.Headers.HeaderOps, args[0], args[1], nil)
+ case 3:
+ err = headers.CaddyfileHeaderOp(&handler.Headers.HeaderOps, args[0], args[1], &args[2])
+ default:
+ return nil, h.ArgErr()
+ }
+
+ if err != nil {
+ return nil, h.Err(err.Error())
+ }
+ }
+
+ case "GET", "HEAD":
+ method := h.Val()
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ target := h.Val()
+ handler.Resources = append(handler.Resources, Resource{
+ Method: method,
+ Target: target,
+ })
+
+ default:
+ handler.Resources = append(handler.Resources, Resource{Target: h.Val()})
+ }
+ }
+ return handler, nil
+}
diff --git a/modules/caddyhttp/push/handler.go b/modules/caddyhttp/push/handler.go
new file mode 100644
index 00000000000..1fbe53d8366
--- /dev/null
+++ b/modules/caddyhttp/push/handler.go
@@ -0,0 +1,263 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package push
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers"
+)
+
+func init() {
+ caddy.RegisterModule(Handler{})
+}
+
+// Handler is a middleware for HTTP/2 server push. Note that
+// HTTP/2 server push has been deprecated by some clients and
+// its use is discouraged unless you can accurately predict
+// which resources actually need to be pushed to the client;
+// it can be difficult to know what the client already has
+// cached. Pushing unnecessary resources results in worse
+// performance. Consider using HTTP 103 Early Hints instead.
+//
+// This handler supports pushing from Link headers; in other
+// words, if the eventual response has Link headers, this
+// handler will push the resources indicated by those headers,
+// even without specifying any resources in its config.
+type Handler struct {
+ // The resources to push.
+ Resources []Resource `json:"resources,omitempty"`
+
+ // Headers to modify for the push requests.
+ Headers *HeaderConfig `json:"headers,omitempty"`
+
+ logger *zap.Logger
+}
+
+// CaddyModule returns the Caddy module information.
+func (Handler) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.handlers.push",
+ New: func() caddy.Module { return new(Handler) },
+ }
+}
+
+// Provision sets up h.
+func (h *Handler) Provision(ctx caddy.Context) error {
+ h.logger = ctx.Logger()
+ if h.Headers != nil {
+ err := h.Headers.Provision(ctx)
+ if err != nil {
+ return fmt.Errorf("provisioning header operations: %v", err)
+ }
+ }
+ return nil
+}
+
+func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ pusher, ok := w.(http.Pusher)
+ if !ok {
+ return next.ServeHTTP(w, r)
+ }
+
+ // short-circuit recursive pushes
+ if _, ok := r.Header[pushHeader]; ok {
+ return next.ServeHTTP(w, r)
+ }
+
+ repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
+ server := r.Context().Value(caddyhttp.ServerCtxKey).(*caddyhttp.Server)
+ shouldLogCredentials := server.Logs != nil && server.Logs.ShouldLogCredentials
+
+ // create header for push requests
+ hdr := h.initializePushHeaders(r, repl)
+
+ // push first!
+ for _, resource := range h.Resources {
+ if c := h.logger.Check(zapcore.DebugLevel, "pushing resource"); c != nil {
+ c.Write(
+ zap.String("uri", r.RequestURI),
+ zap.String("push_method", resource.Method),
+ zap.String("push_target", resource.Target),
+ zap.Object("push_headers", caddyhttp.LoggableHTTPHeader{
+ Header: hdr,
+ ShouldLogCredentials: shouldLogCredentials,
+ }),
+ )
+ }
+ err := pusher.Push(repl.ReplaceAll(resource.Target, "."), &http.PushOptions{
+ Method: resource.Method,
+ Header: hdr,
+ })
+ if err != nil {
+ // usually this means either that push is not
+ // supported or concurrent streams are full
+ break
+ }
+ }
+
+ // wrap the response writer so that we can initiate push of any resources
+ // described in Link header fields before the response is written
+ lp := linkPusher{
+ ResponseWriterWrapper: &caddyhttp.ResponseWriterWrapper{ResponseWriter: w},
+ handler: h,
+ pusher: pusher,
+ header: hdr,
+ request: r,
+ }
+
+ // serve only after pushing!
+ if err := next.ServeHTTP(lp, r); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h Handler) initializePushHeaders(r *http.Request, repl *caddy.Replacer) http.Header {
+ hdr := make(http.Header)
+
+ // prevent recursive pushes
+ hdr.Set(pushHeader, "1")
+
+ // set initial header fields; since exactly how headers should
+ // be implemented for server push is not well-understood, we
+ // are being conservative for now like httpd is:
+ // https://httpd.apache.org/docs/2.4/en/howto/http2.html#push
+ // we only copy some well-known, safe headers that are likely
+ // crucial when requesting certain kinds of content
+ for _, fieldName := range safeHeaders {
+ if vals, ok := r.Header[fieldName]; ok {
+ hdr[fieldName] = vals
+ }
+ }
+
+ // user can customize the push request headers
+ if h.Headers != nil {
+ h.Headers.ApplyTo(hdr, repl)
+ }
+
+ return hdr
+}
+
+// servePreloadLinks parses Link headers from upstream and pushes
+// resources described by them. If a resource has the "nopush"
+// attribute or describes an external entity (meaning, the resource
+// URI includes a scheme), it will not be pushed.
+func (h Handler) servePreloadLinks(pusher http.Pusher, hdr http.Header, resources []string) {
+ for _, resource := range resources {
+ for _, resource := range parseLinkHeader(resource) {
+ if _, ok := resource.params["nopush"]; ok {
+ continue
+ }
+ if isRemoteResource(resource.uri) {
+ continue
+ }
+ err := pusher.Push(resource.uri, &http.PushOptions{
+ Header: hdr,
+ })
+ if err != nil {
+ return
+ }
+ }
+ }
+}
+
+// Resource represents a request for a resource to push.
+type Resource struct {
+ // Method is the request method, which must be GET or HEAD.
+ // Default is GET.
+ Method string `json:"method,omitempty"`
+
+ // Target is the path to the resource being pushed.
+ Target string `json:"target,omitempty"`
+}
+
+// HeaderConfig configures headers for synthetic push requests.
+type HeaderConfig struct {
+ headers.HeaderOps
+}
+
+// linkPusher is a http.ResponseWriter that intercepts
+// the WriteHeader() call to ensure that any resources
+// described by Link response headers get pushed before
+// the response is allowed to be written.
+type linkPusher struct {
+ *caddyhttp.ResponseWriterWrapper
+ handler Handler
+ pusher http.Pusher
+ header http.Header
+ request *http.Request
+}
+
+func (lp linkPusher) WriteHeader(statusCode int) {
+ if links, ok := lp.ResponseWriter.Header()["Link"]; ok {
+ // only initiate these pushes if it hasn't been done yet
+ if val := caddyhttp.GetVar(lp.request.Context(), pushedLink); val == nil {
+ if c := lp.handler.logger.Check(zapcore.DebugLevel, "pushing Link resources"); c != nil {
+ c.Write(zap.Strings("linked", links))
+ }
+ caddyhttp.SetVar(lp.request.Context(), pushedLink, true)
+ lp.handler.servePreloadLinks(lp.pusher, lp.header, links)
+ }
+ }
+ lp.ResponseWriter.WriteHeader(statusCode)
+}
+
+// isRemoteResource returns true if resource starts with
+// a scheme or is a protocol-relative URI.
+func isRemoteResource(resource string) bool {
+ return strings.HasPrefix(resource, "//") ||
+ strings.HasPrefix(resource, "http://") ||
+ strings.HasPrefix(resource, "https://")
+}
+
+// safeHeaders is a list of header fields that are
+// safe to copy to push requests implicitly. It is
+// assumed that requests for certain kinds of content
+// would fail without these fields present.
+var safeHeaders = []string{
+ "Accept-Encoding",
+ "Accept-Language",
+ "Accept",
+ "Cache-Control",
+ "User-Agent",
+}
+
+// pushHeader is a header field that gets added to push requests
+// in order to avoid recursive/infinite pushes.
+const pushHeader = "Caddy-Push"
+
+// pushedLink is the key for the variable on the request
+// context that we use to remember whether we have already
+// pushed resources from Link headers yet; otherwise, if
+// multiple push handlers are invoked, it would repeat the
+// pushing of Link headers.
+const pushedLink = "http.handlers.push.pushed_link"
+
+// Interface guards
+var (
+ _ caddy.Provisioner = (*Handler)(nil)
+ _ caddyhttp.MiddlewareHandler = (*Handler)(nil)
+ _ http.ResponseWriter = (*linkPusher)(nil)
+ _ http.Pusher = (*linkPusher)(nil)
+)
diff --git a/modules/caddyhttp/push/link.go b/modules/caddyhttp/push/link.go
new file mode 100644
index 00000000000..855dffd0509
--- /dev/null
+++ b/modules/caddyhttp/push/link.go
@@ -0,0 +1,77 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package push
+
+import (
+ "strings"
+)
+
+// linkResource contains the results of a parsed Link header.
+type linkResource struct {
+ uri string
+ params map[string]string
+}
+
+// parseLinkHeader is responsible for parsing Link header
+// and returning list of found resources.
+//
+// Accepted formats are:
+//
+// Link: ; as=script
+// Link: ; as=script,; as=style
+// Link: ;
+//
+// where begins with a forward slash (/).
+func parseLinkHeader(header string) []linkResource {
+ resources := []linkResource{}
+
+ if header == "" {
+ return resources
+ }
+
+ for _, link := range strings.Split(header, comma) {
+ l := linkResource{params: make(map[string]string)}
+
+ li, ri := strings.Index(link, "<"), strings.Index(link, ">")
+ if li == -1 || ri == -1 {
+ continue
+ }
+
+ l.uri = strings.TrimSpace(link[li+1 : ri])
+
+ for _, param := range strings.Split(strings.TrimSpace(link[ri+1:]), semicolon) {
+ before, after, isCut := strings.Cut(strings.TrimSpace(param), equal)
+ key := strings.TrimSpace(before)
+ if key == "" {
+ continue
+ }
+ if isCut {
+ l.params[key] = strings.TrimSpace(after)
+ } else {
+ l.params[key] = key
+ }
+ }
+
+ resources = append(resources, l)
+ }
+
+ return resources
+}
+
+const (
+ comma = ","
+ semicolon = ";"
+ equal = "="
+)
diff --git a/modules/caddyhttp/push/link_test.go b/modules/caddyhttp/push/link_test.go
new file mode 100644
index 00000000000..634bcb6dc3e
--- /dev/null
+++ b/modules/caddyhttp/push/link_test.go
@@ -0,0 +1,85 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package push
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestParseLinkHeader(t *testing.T) {
+ testCases := []struct {
+ header string
+ expectedResources []linkResource
+ }{
+ {
+ header: "; as=script",
+ expectedResources: []linkResource{{uri: "/resource", params: map[string]string{"as": "script"}}},
+ },
+ {
+ header: "",
+ expectedResources: []linkResource{{uri: "/resource", params: map[string]string{}}},
+ },
+ {
+ header: "; nopush",
+ expectedResources: []linkResource{{uri: "/resource", params: map[string]string{"nopush": "nopush"}}},
+ },
+ {
+ header: ";nopush;rel=next",
+ expectedResources: []linkResource{{uri: "/resource", params: map[string]string{"nopush": "nopush", "rel": "next"}}},
+ },
+ {
+ header: ";nopush;rel=next,;nopush",
+ expectedResources: []linkResource{
+ {uri: "/resource", params: map[string]string{"nopush": "nopush", "rel": "next"}},
+ {uri: "/resource2", params: map[string]string{"nopush": "nopush"}},
+ },
+ },
+ {
+ header: ",",
+ expectedResources: []linkResource{
+ {uri: "/resource", params: map[string]string{}},
+ {uri: "/resource2", params: map[string]string{}},
+ },
+ },
+ {
+ header: "malformed",
+ expectedResources: []linkResource{},
+ },
+ {
+ header: " ; ",
+ expectedResources: []linkResource{{uri: "/resource", params: map[string]string{}}},
+ },
+ }
+
+ for i, test := range testCases {
+ actualResources := parseLinkHeader(test.header)
+ if !reflect.DeepEqual(actualResources, test.expectedResources) {
+ t.Errorf("Test %d (header: %s) - expected resources %v, got %v",
+ i, test.header, test.expectedResources, actualResources)
+ }
+ }
+}
diff --git a/modules/caddyhttp/replacer.go b/modules/caddyhttp/replacer.go
new file mode 100644
index 00000000000..776aa6294b3
--- /dev/null
+++ b/modules/caddyhttp/replacer.go
@@ -0,0 +1,562 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "bytes"
+ "context"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/base64"
+ "encoding/pem"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/netip"
+ "net/textproto"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/uuid"
+ "go.uber.org/zap"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
+)
+
+// NewTestReplacer creates a replacer for an http.Request
+// for use in tests that are not in this package
+func NewTestReplacer(req *http.Request) *caddy.Replacer {
+ repl := caddy.NewReplacer()
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ *req = *req.WithContext(ctx)
+ addHTTPVarsToReplacer(repl, req, nil)
+ return repl
+}
+
+func addHTTPVarsToReplacer(repl *caddy.Replacer, req *http.Request, w http.ResponseWriter) {
+ SetVar(req.Context(), "start_time", time.Now())
+ SetVar(req.Context(), "uuid", new(requestID))
+
+ httpVars := func(key string) (any, bool) {
+ if req != nil {
+ // query string parameters
+ if strings.HasPrefix(key, reqURIQueryReplPrefix) {
+ vals := req.URL.Query()[key[len(reqURIQueryReplPrefix):]]
+ // always return true, since the query param might
+ // be present only in some requests
+ return strings.Join(vals, ","), true
+ }
+
+ // request header fields
+ if strings.HasPrefix(key, reqHeaderReplPrefix) {
+ field := key[len(reqHeaderReplPrefix):]
+ vals := req.Header[textproto.CanonicalMIMEHeaderKey(field)]
+ // always return true, since the header field might
+ // be present only in some requests
+ return strings.Join(vals, ","), true
+ }
+
+ // cookies
+ if strings.HasPrefix(key, reqCookieReplPrefix) {
+ name := key[len(reqCookieReplPrefix):]
+ for _, cookie := range req.Cookies() {
+ if strings.EqualFold(name, cookie.Name) {
+ // always return true, since the cookie might
+ // be present only in some requests
+ return cookie.Value, true
+ }
+ }
+ }
+
+ // http.request.tls.*
+ if strings.HasPrefix(key, reqTLSReplPrefix) {
+ return getReqTLSReplacement(req, key)
+ }
+
+ switch key {
+ case "http.request.method":
+ return req.Method, true
+ case "http.request.scheme":
+ if req.TLS != nil {
+ return "https", true
+ }
+ return "http", true
+ case "http.request.proto":
+ return req.Proto, true
+ case "http.request.host":
+ host, _, err := net.SplitHostPort(req.Host)
+ if err != nil {
+ return req.Host, true // OK; there probably was no port
+ }
+ return host, true
+ case "http.request.port":
+ _, port, _ := net.SplitHostPort(req.Host)
+ if portNum, err := strconv.Atoi(port); err == nil {
+ return portNum, true
+ }
+ return port, true
+ case "http.request.hostport":
+ return req.Host, true
+ case "http.request.local":
+ localAddr, _ := req.Context().Value(http.LocalAddrContextKey).(net.Addr)
+ return localAddr.String(), true
+ case "http.request.local.host":
+ localAddr, _ := req.Context().Value(http.LocalAddrContextKey).(net.Addr)
+ host, _, err := net.SplitHostPort(localAddr.String())
+ if err != nil {
+ // localAddr is host:port for tcp and udp sockets and /unix/socket.path
+ // for unix sockets. net.SplitHostPort only operates on tcp and udp sockets,
+ // not unix sockets and will fail with the latter.
+ // We assume when net.SplitHostPort fails, localAddr is a unix socket and thus
+ // already "split" and save to return.
+ return localAddr, true
+ }
+ return host, true
+ case "http.request.local.port":
+ localAddr, _ := req.Context().Value(http.LocalAddrContextKey).(net.Addr)
+ _, port, _ := net.SplitHostPort(localAddr.String())
+ if portNum, err := strconv.Atoi(port); err == nil {
+ return portNum, true
+ }
+ return port, true
+ case "http.request.remote":
+ if req.TLS != nil && !req.TLS.HandshakeComplete {
+ // without a complete handshake (QUIC "early data") we can't trust the remote IP address to not be spoofed
+ return nil, true
+ }
+ return req.RemoteAddr, true
+ case "http.request.remote.host":
+ if req.TLS != nil && !req.TLS.HandshakeComplete {
+ // without a complete handshake (QUIC "early data") we can't trust the remote IP address to not be spoofed
+ return nil, true
+ }
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err != nil {
+ // req.RemoteAddr is host:port for tcp and udp sockets and /unix/socket.path
+ // for unix sockets. net.SplitHostPort only operates on tcp and udp sockets,
+ // not unix sockets and will fail with the latter.
+ // We assume when net.SplitHostPort fails, req.RemoteAddr is a unix socket
+ // and thus already "split" and save to return.
+ return req.RemoteAddr, true
+ }
+ return host, true
+ case "http.request.remote.port":
+ _, port, _ := net.SplitHostPort(req.RemoteAddr)
+ if portNum, err := strconv.Atoi(port); err == nil {
+ return portNum, true
+ }
+ return port, true
+
+ // current URI, including any internal rewrites
+ case "http.request.uri":
+ return req.URL.RequestURI(), true
+ case "http.request.uri.path":
+ return req.URL.Path, true
+ case "http.request.uri.path.file":
+ _, file := path.Split(req.URL.Path)
+ return file, true
+ case "http.request.uri.path.dir":
+ dir, _ := path.Split(req.URL.Path)
+ return dir, true
+ case "http.request.uri.path.file.base":
+ return strings.TrimSuffix(path.Base(req.URL.Path), path.Ext(req.URL.Path)), true
+ case "http.request.uri.path.file.ext":
+ return path.Ext(req.URL.Path), true
+ case "http.request.uri.query":
+ return req.URL.RawQuery, true
+ case "http.request.uri.prefixed_query":
+ if req.URL.RawQuery == "" {
+ return "", true
+ }
+ return "?" + req.URL.RawQuery, true
+ case "http.request.duration":
+ start := GetVar(req.Context(), "start_time").(time.Time)
+ return time.Since(start), true
+ case "http.request.duration_ms":
+ start := GetVar(req.Context(), "start_time").(time.Time)
+ return time.Since(start).Seconds() * 1e3, true // multiply seconds to preserve decimal (see #4666)
+
+ case "http.request.uuid":
+ // fetch the UUID for this request
+ id := GetVar(req.Context(), "uuid").(*requestID)
+
+ // set it to this request's access log
+ extra := req.Context().Value(ExtraLogFieldsCtxKey).(*ExtraLogFields)
+ extra.Set(zap.String("uuid", id.String()))
+
+ return id.String(), true
+
+ case "http.request.body":
+ if req.Body == nil {
+ return "", true
+ }
+ // normally net/http will close the body for us, but since we
+ // are replacing it with a fake one, we have to ensure we close
+ // the real body ourselves when we're done
+ defer req.Body.Close()
+ // read the request body into a buffer (can't pool because we
+ // don't know its lifetime and would have to make a copy anyway)
+ buf := new(bytes.Buffer)
+ _, _ = io.Copy(buf, req.Body) // can't handle error, so just ignore it
+ req.Body = io.NopCloser(buf) // replace real body with buffered data
+ return buf.String(), true
+
+ // original request, before any internal changes
+ case "http.request.orig_method":
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ return or.Method, true
+ case "http.request.orig_uri":
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ return or.RequestURI, true
+ case "http.request.orig_uri.path":
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ return or.URL.Path, true
+ case "http.request.orig_uri.path.file":
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ _, file := path.Split(or.URL.Path)
+ return file, true
+ case "http.request.orig_uri.path.dir":
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ dir, _ := path.Split(or.URL.Path)
+ return dir, true
+ case "http.request.orig_uri.query":
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ return or.URL.RawQuery, true
+ case "http.request.orig_uri.prefixed_query":
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ if or.URL.RawQuery == "" {
+ return "", true
+ }
+ return "?" + or.URL.RawQuery, true
+ }
+
+ // remote IP range/prefix (e.g. keep top 24 bits of 1.2.3.4 => "1.2.3.0/24")
+ // syntax: "/V4,V6" where V4 = IPv4 bits, and V6 = IPv6 bits; if no comma, then same bit length used for both
+ // (EXPERIMENTAL)
+ if strings.HasPrefix(key, "http.request.remote.host/") {
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err != nil {
+ host = req.RemoteAddr // assume no port, I guess?
+ }
+ addr, err := netip.ParseAddr(host)
+ if err != nil {
+ return host, true // not an IP address
+ }
+ // extract the bits from the end of the placeholder (start after "/") then split on ","
+ bitsBoth := key[strings.Index(key, "/")+1:]
+ ipv4BitsStr, ipv6BitsStr, cutOK := strings.Cut(bitsBoth, ",")
+ bitsStr := ipv4BitsStr
+ if addr.Is6() && cutOK {
+ bitsStr = ipv6BitsStr
+ }
+ // convert to integer then compute prefix
+ bits, err := strconv.Atoi(bitsStr)
+ if err != nil {
+ return "", true
+ }
+ prefix, err := addr.Prefix(bits)
+ if err != nil {
+ return "", true
+ }
+ return prefix.String(), true
+ }
+
+ // hostname labels
+ if strings.HasPrefix(key, reqHostLabelsReplPrefix) {
+ idxStr := key[len(reqHostLabelsReplPrefix):]
+ idx, err := strconv.Atoi(idxStr)
+ if err != nil || idx < 0 {
+ return "", false
+ }
+ reqHost, _, err := net.SplitHostPort(req.Host)
+ if err != nil {
+ reqHost = req.Host // OK; assume there was no port
+ }
+ hostLabels := strings.Split(reqHost, ".")
+ if idx >= len(hostLabels) {
+ return "", true
+ }
+ return hostLabels[len(hostLabels)-idx-1], true
+ }
+
+ // path parts
+ if strings.HasPrefix(key, reqURIPathReplPrefix) {
+ idxStr := key[len(reqURIPathReplPrefix):]
+ idx, err := strconv.Atoi(idxStr)
+ if err != nil {
+ return "", false
+ }
+ pathParts := strings.Split(req.URL.Path, "/")
+ if len(pathParts) > 0 && pathParts[0] == "" {
+ pathParts = pathParts[1:]
+ }
+ if idx < 0 {
+ return "", false
+ }
+ if idx >= len(pathParts) {
+ return "", true
+ }
+ return pathParts[idx], true
+ }
+
+ // orig uri path parts
+ if strings.HasPrefix(key, reqOrigURIPathReplPrefix) {
+ idxStr := key[len(reqOrigURIPathReplPrefix):]
+ idx, err := strconv.Atoi(idxStr)
+ if err != nil {
+ return "", false
+ }
+ or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
+ pathParts := strings.Split(or.URL.Path, "/")
+ if len(pathParts) > 0 && pathParts[0] == "" {
+ pathParts = pathParts[1:]
+ }
+ if idx < 0 {
+ return "", false
+ }
+ if idx >= len(pathParts) {
+ return "", true
+ }
+ return pathParts[idx], true
+ }
+
+ // middleware variables
+ if strings.HasPrefix(key, varsReplPrefix) {
+ varName := key[len(varsReplPrefix):]
+ raw := GetVar(req.Context(), varName)
+ // variables can be dynamic, so always return true
+ // even when it may not be set; treat as empty then
+ return raw, true
+ }
+ }
+
+ if w != nil {
+ // response header fields
+ if strings.HasPrefix(key, respHeaderReplPrefix) {
+ field := key[len(respHeaderReplPrefix):]
+ vals := w.Header()[textproto.CanonicalMIMEHeaderKey(field)]
+ // always return true, since the header field might
+ // be present only in some responses
+ return strings.Join(vals, ","), true
+ }
+ }
+
+ switch {
+ case key == "http.shutting_down":
+ server := req.Context().Value(ServerCtxKey).(*Server)
+ server.shutdownAtMu.RLock()
+ defer server.shutdownAtMu.RUnlock()
+ return !server.shutdownAt.IsZero(), true
+ case key == "http.time_until_shutdown":
+ server := req.Context().Value(ServerCtxKey).(*Server)
+ server.shutdownAtMu.RLock()
+ defer server.shutdownAtMu.RUnlock()
+ if server.shutdownAt.IsZero() {
+ return nil, true
+ }
+ return time.Until(server.shutdownAt), true
+ }
+
+ return nil, false
+ }
+
+ repl.Map(httpVars)
+}
+
+func getReqTLSReplacement(req *http.Request, key string) (any, bool) {
+ if req == nil || req.TLS == nil {
+ return nil, false
+ }
+
+ if len(key) < len(reqTLSReplPrefix) {
+ return nil, false
+ }
+
+ field := strings.ToLower(key[len(reqTLSReplPrefix):])
+
+ if strings.HasPrefix(field, "client.") {
+ cert := getTLSPeerCert(req.TLS)
+ if cert == nil {
+ return nil, false
+ }
+
+ // subject alternate names (SANs)
+ if strings.HasPrefix(field, "client.san.") {
+ field = field[len("client.san."):]
+ var fieldName string
+ var fieldValue any
+ switch {
+ case strings.HasPrefix(field, "dns_names"):
+ fieldName = "dns_names"
+ fieldValue = cert.DNSNames
+ case strings.HasPrefix(field, "emails"):
+ fieldName = "emails"
+ fieldValue = cert.EmailAddresses
+ case strings.HasPrefix(field, "ips"):
+ fieldName = "ips"
+ fieldValue = cert.IPAddresses
+ case strings.HasPrefix(field, "uris"):
+ fieldName = "uris"
+ fieldValue = cert.URIs
+ default:
+ return nil, false
+ }
+ field = field[len(fieldName):]
+
+ // if no index was specified, return the whole list
+ if field == "" {
+ return fieldValue, true
+ }
+ if len(field) < 2 || field[0] != '.' {
+ return nil, false
+ }
+ field = field[1:] // trim '.' between field name and index
+
+ // get the numeric index
+ idx, err := strconv.Atoi(field)
+ if err != nil || idx < 0 {
+ return nil, false
+ }
+
+ // access the indexed element and return it
+ switch v := fieldValue.(type) {
+ case []string:
+ if idx >= len(v) {
+ return nil, true
+ }
+ return v[idx], true
+ case []net.IP:
+ if idx >= len(v) {
+ return nil, true
+ }
+ return v[idx], true
+ case []*url.URL:
+ if idx >= len(v) {
+ return nil, true
+ }
+ return v[idx], true
+ }
+ }
+
+ switch field {
+ case "client.fingerprint":
+ return fmt.Sprintf("%x", sha256.Sum256(cert.Raw)), true
+ case "client.public_key", "client.public_key_sha256":
+ if cert.PublicKey == nil {
+ return nil, true
+ }
+ pubKeyBytes, err := marshalPublicKey(cert.PublicKey)
+ if err != nil {
+ return nil, true
+ }
+ if strings.HasSuffix(field, "_sha256") {
+ return fmt.Sprintf("%x", sha256.Sum256(pubKeyBytes)), true
+ }
+ return fmt.Sprintf("%x", pubKeyBytes), true
+ case "client.issuer":
+ return cert.Issuer, true
+ case "client.serial":
+ return cert.SerialNumber, true
+ case "client.subject":
+ return cert.Subject, true
+ case "client.certificate_pem":
+ block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
+ return pem.EncodeToMemory(&block), true
+ case "client.certificate_der_base64":
+ return base64.StdEncoding.EncodeToString(cert.Raw), true
+ default:
+ return nil, false
+ }
+ }
+
+ switch field {
+ case "version":
+ return caddytls.ProtocolName(req.TLS.Version), true
+ case "cipher_suite":
+ return tls.CipherSuiteName(req.TLS.CipherSuite), true
+ case "resumed":
+ return req.TLS.DidResume, true
+ case "proto":
+ return req.TLS.NegotiatedProtocol, true
+ case "proto_mutual":
+ // req.TLS.NegotiatedProtocolIsMutual is deprecated - it's always true.
+ return true, true
+ case "server_name":
+ return req.TLS.ServerName, true
+ }
+ return nil, false
+}
+
+// marshalPublicKey returns the byte encoding of pubKey.
+func marshalPublicKey(pubKey any) ([]byte, error) {
+ switch key := pubKey.(type) {
+ case *rsa.PublicKey:
+ return asn1.Marshal(key)
+ case *ecdsa.PublicKey:
+ e, err := key.ECDH()
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+ case ed25519.PublicKey:
+ return key, nil
+ }
+ return nil, fmt.Errorf("unrecognized public key type: %T", pubKey)
+}
+
+// getTLSPeerCert retrieves the first peer certificate from a TLS session.
+// Returns nil if no peer cert is in use.
+func getTLSPeerCert(cs *tls.ConnectionState) *x509.Certificate {
+ if len(cs.PeerCertificates) == 0 {
+ return nil
+ }
+ return cs.PeerCertificates[0]
+}
+
+type requestID struct {
+ value string
+}
+
+// Lazy generates UUID string or return cached value if present
+func (rid *requestID) String() string {
+ if rid.value == "" {
+ if id, err := uuid.NewRandom(); err == nil {
+ rid.value = id.String()
+ }
+ }
+ return rid.value
+}
+
+const (
+ reqCookieReplPrefix = "http.request.cookie."
+ reqHeaderReplPrefix = "http.request.header."
+ reqHostLabelsReplPrefix = "http.request.host.labels."
+ reqTLSReplPrefix = "http.request.tls."
+ reqURIPathReplPrefix = "http.request.uri.path."
+ reqURIQueryReplPrefix = "http.request.uri.query."
+ respHeaderReplPrefix = "http.response.header."
+ varsReplPrefix = "http.vars."
+ reqOrigURIPathReplPrefix = "http.request.orig_uri.path."
+)
diff --git a/modules/caddyhttp/replacer_test.go b/modules/caddyhttp/replacer_test.go
new file mode 100644
index 00000000000..50a2e8c62cb
--- /dev/null
+++ b/modules/caddyhttp/replacer_test.go
@@ -0,0 +1,232 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+func TestHTTPVarReplacement(t *testing.T) {
+ req, _ := http.NewRequest(http.MethodGet, "/foo/bar.tar.gz", nil)
+ repl := caddy.NewReplacer()
+ localAddr, _ := net.ResolveTCPAddr("tcp", "192.168.159.1:80")
+ ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
+ ctx = context.WithValue(ctx, http.LocalAddrContextKey, localAddr)
+ req = req.WithContext(ctx)
+ req.Host = "example.com:80"
+ req.RemoteAddr = "192.168.159.32:1234"
+
+ clientCert := []byte(`-----BEGIN CERTIFICATE-----
+MIIB9jCCAV+gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1DYWRk
+eSBUZXN0IENBMB4XDTE4MDcyNDIxMzUwNVoXDTI4MDcyMTIxMzUwNVowHTEbMBkG
+A1UEAwwSY2xpZW50LmxvY2FsZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB
+iQKBgQDFDEpzF0ew68teT3xDzcUxVFaTII+jXH1ftHXxxP4BEYBU4q90qzeKFneF
+z83I0nC0WAQ45ZwHfhLMYHFzHPdxr6+jkvKPASf0J2v2HDJuTM1bHBbik5Ls5eq+
+fVZDP8o/VHKSBKxNs8Goc2NTsr5b07QTIpkRStQK+RJALk4x9QIDAQABo0swSTAJ
+BgNVHRMEAjAAMAsGA1UdDwQEAwIHgDAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8A
+AAEwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADgYEANSjz2Sk+
+eqp31wM9il1n+guTNyxJd+FzVAH+hCZE5K+tCgVDdVFUlDEHHbS/wqb2PSIoouLV
+3Q9fgDkiUod+uIK0IynzIKvw+Cjg+3nx6NQ0IM0zo8c7v398RzB4apbXKZyeeqUH
+9fNwfEi+OoXR6s+upSKobCmLGLGi9Na5s5g=
+-----END CERTIFICATE-----`)
+
+ block, _ := pem.Decode(clientCert)
+ if block == nil {
+ t.Fatalf("failed to decode PEM certificate")
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ t.Fatalf("failed to decode PEM certificate: %v", err)
+ }
+
+ req.TLS = &tls.ConnectionState{
+ Version: tls.VersionTLS13,
+ HandshakeComplete: true,
+ ServerName: "example.com",
+ CipherSuite: tls.TLS_AES_256_GCM_SHA384,
+ PeerCertificates: []*x509.Certificate{cert},
+ NegotiatedProtocol: "h2",
+ NegotiatedProtocolIsMutual: true,
+ }
+
+ res := httptest.NewRecorder()
+ addHTTPVarsToReplacer(repl, req, res)
+
+ for i, tc := range []struct {
+ get string
+ expect string
+ }{
+ {
+ get: "http.request.scheme",
+ expect: "https",
+ },
+ {
+ get: "http.request.method",
+ expect: http.MethodGet,
+ },
+ {
+ get: "http.request.host",
+ expect: "example.com",
+ },
+ {
+ get: "http.request.port",
+ expect: "80",
+ },
+ {
+ get: "http.request.hostport",
+ expect: "example.com:80",
+ },
+ {
+ get: "http.request.local.host",
+ expect: "192.168.159.1",
+ },
+ {
+ get: "http.request.local.port",
+ expect: "80",
+ },
+ {
+ get: "http.request.local",
+ expect: "192.168.159.1:80",
+ },
+ {
+ get: "http.request.remote.host",
+ expect: "192.168.159.32",
+ },
+ {
+ get: "http.request.remote.host/24",
+ expect: "192.168.159.0/24",
+ },
+ {
+ get: "http.request.remote.host/24,32",
+ expect: "192.168.159.0/24",
+ },
+ {
+ get: "http.request.remote.host/999",
+ expect: "",
+ },
+ {
+ get: "http.request.remote.port",
+ expect: "1234",
+ },
+ {
+ get: "http.request.host.labels.0",
+ expect: "com",
+ },
+ {
+ get: "http.request.host.labels.1",
+ expect: "example",
+ },
+ {
+ get: "http.request.host.labels.2",
+ expect: "",
+ },
+ {
+ get: "http.request.uri.path.file",
+ expect: "bar.tar.gz",
+ },
+ {
+ get: "http.request.uri.path.file.base",
+ expect: "bar.tar",
+ },
+ {
+ // not ideal, but also most correct, given that files can have dots (example: index..html) TODO: maybe this isn't right..
+ get: "http.request.uri.path.file.ext",
+ expect: ".gz",
+ },
+ {
+ get: "http.request.tls.cipher_suite",
+ expect: "TLS_AES_256_GCM_SHA384",
+ },
+ {
+ get: "http.request.tls.proto",
+ expect: "h2",
+ },
+ {
+ get: "http.request.tls.proto_mutual",
+ expect: "true",
+ },
+ {
+ get: "http.request.tls.resumed",
+ expect: "false",
+ },
+ {
+ get: "http.request.tls.server_name",
+ expect: "example.com",
+ },
+ {
+ get: "http.request.tls.version",
+ expect: "tls1.3",
+ },
+ {
+ get: "http.request.tls.client.fingerprint",
+ expect: "9f57b7b497cceacc5459b76ac1c3afedbc12b300e728071f55f84168ff0f7702",
+ },
+ {
+ get: "http.request.tls.client.issuer",
+ expect: "CN=Caddy Test CA",
+ },
+ {
+ get: "http.request.tls.client.serial",
+ expect: "2",
+ },
+ {
+ get: "http.request.tls.client.subject",
+ expect: "CN=client.localdomain",
+ },
+ {
+ get: "http.request.tls.client.san.dns_names",
+ expect: "[localhost]",
+ },
+ {
+ get: "http.request.tls.client.san.dns_names.0",
+ expect: "localhost",
+ },
+ {
+ get: "http.request.tls.client.san.dns_names.1",
+ expect: "",
+ },
+ {
+ get: "http.request.tls.client.san.ips",
+ expect: "[127.0.0.1]",
+ },
+ {
+ get: "http.request.tls.client.san.ips.0",
+ expect: "127.0.0.1",
+ },
+ {
+ get: "http.request.tls.client.certificate_pem",
+ expect: string(clientCert) + "\n", // returned value comes with a newline appended to it
+ },
+ } {
+ actual, got := repl.GetString(tc.get)
+ if !got {
+ t.Errorf("Test %d: Expected to recognize the placeholder name, but didn't", i)
+ }
+ if actual != tc.expect {
+ t.Errorf("Test %d: Expected %s to be '%s' but got '%s'",
+ i, tc.get, tc.expect, actual)
+ }
+ }
+}
diff --git a/modules/caddyhttp/requestbody/caddyfile.go b/modules/caddyhttp/requestbody/caddyfile.go
new file mode 100644
index 00000000000..8378ad7f471
--- /dev/null
+++ b/modules/caddyhttp/requestbody/caddyfile.go
@@ -0,0 +1,77 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package requestbody
+
+import (
+ "time"
+
+ "github.com/dustin/go-humanize"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ httpcaddyfile.RegisterHandlerDirective("request_body", parseCaddyfile)
+}
+
+func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
+ h.Next() // consume directive name
+
+ rb := new(RequestBody)
+
+ // configuration should be in a block
+ for h.NextBlock(0) {
+ switch h.Val() {
+ case "max_size":
+ var sizeStr string
+ if !h.AllArgs(&sizeStr) {
+ return nil, h.ArgErr()
+ }
+ size, err := humanize.ParseBytes(sizeStr)
+ if err != nil {
+ return nil, h.Errf("parsing max_size: %v", err)
+ }
+ rb.MaxSize = int64(size)
+
+ case "read_timeout":
+ var timeoutStr string
+ if !h.AllArgs(&timeoutStr) {
+ return nil, h.ArgErr()
+ }
+ timeout, err := time.ParseDuration(timeoutStr)
+ if err != nil {
+ return nil, h.Errf("parsing read_timeout: %v", err)
+ }
+ rb.ReadTimeout = timeout
+
+ case "write_timeout":
+ var timeoutStr string
+ if !h.AllArgs(&timeoutStr) {
+ return nil, h.ArgErr()
+ }
+ timeout, err := time.ParseDuration(timeoutStr)
+ if err != nil {
+ return nil, h.Errf("parsing write_timeout: %v", err)
+ }
+ rb.WriteTimeout = timeout
+
+ default:
+ return nil, h.Errf("unrecognized request_body subdirective '%s'", h.Val())
+ }
+ }
+
+ return rb, nil
+}
diff --git a/modules/caddyhttp/requestbody/requestbody.go b/modules/caddyhttp/requestbody/requestbody.go
new file mode 100644
index 00000000000..830050416e9
--- /dev/null
+++ b/modules/caddyhttp/requestbody/requestbody.go
@@ -0,0 +1,106 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package requestbody
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "time"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func init() {
+ caddy.RegisterModule(RequestBody{})
+}
+
+// RequestBody is a middleware for manipulating the request body.
+type RequestBody struct {
+ // The maximum number of bytes to allow reading from the body by a later handler.
+ // If more bytes are read, an error with HTTP status 413 is returned.
+ MaxSize int64 `json:"max_size,omitempty"`
+
+ // EXPERIMENTAL. Subject to change/removal.
+ ReadTimeout time.Duration `json:"read_timeout,omitempty"`
+
+ // EXPERIMENTAL. Subject to change/removal.
+ WriteTimeout time.Duration `json:"write_timeout,omitempty"`
+
+ logger *zap.Logger
+}
+
+// CaddyModule returns the Caddy module information.
+func (RequestBody) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "http.handlers.request_body",
+ New: func() caddy.Module { return new(RequestBody) },
+ }
+}
+
+func (rb *RequestBody) Provision(ctx caddy.Context) error {
+ rb.logger = ctx.Logger()
+ return nil
+}
+
+func (rb RequestBody) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
+ if r.Body == nil {
+ return next.ServeHTTP(w, r)
+ }
+ if rb.MaxSize > 0 {
+ r.Body = errorWrapper{http.MaxBytesReader(w, r.Body, rb.MaxSize)}
+ }
+ if rb.ReadTimeout > 0 || rb.WriteTimeout > 0 {
+ //nolint:bodyclose
+ rc := http.NewResponseController(w)
+ if rb.ReadTimeout > 0 {
+ if err := rc.SetReadDeadline(time.Now().Add(rb.ReadTimeout)); err != nil {
+ if c := rb.logger.Check(zapcore.ErrorLevel, "could not set read deadline"); c != nil {
+ c.Write(zap.Error(err))
+ }
+ }
+ }
+ if rb.WriteTimeout > 0 {
+ if err := rc.SetWriteDeadline(time.Now().Add(rb.WriteTimeout)); err != nil {
+ if c := rb.logger.Check(zapcore.ErrorLevel, "could not set write deadline"); c != nil {
+ c.Write(zap.Error(err))
+ }
+ }
+ }
+ }
+ return next.ServeHTTP(w, r)
+}
+
+// errorWrapper wraps errors that are returned from Read()
+// so that they can be associated with a proper status code.
+type errorWrapper struct {
+ io.ReadCloser
+}
+
+func (ew errorWrapper) Read(p []byte) (n int, err error) {
+ n, err = ew.ReadCloser.Read(p)
+ var mbe *http.MaxBytesError
+ if errors.As(err, &mbe) {
+ err = caddyhttp.Error(http.StatusRequestEntityTooLarge, err)
+ }
+ return
+}
+
+// Interface guard
+var _ caddyhttp.MiddlewareHandler = (*RequestBody)(nil)
diff --git a/modules/caddyhttp/responsematchers.go b/modules/caddyhttp/responsematchers.go
new file mode 100644
index 00000000000..a6b34c76dbf
--- /dev/null
+++ b/modules/caddyhttp/responsematchers.go
@@ -0,0 +1,119 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+)
+
+// ResponseMatcher is a type which can determine if an
+// HTTP response matches some criteria.
+type ResponseMatcher struct {
+ // If set, one of these status codes would be required.
+ // A one-digit status can be used to represent all codes
+ // in that class (e.g. 3 for all 3xx codes).
+ StatusCode []int `json:"status_code,omitempty"`
+
+ // If set, each header specified must be one of the
+ // specified values, with the same logic used by the
+ // [request header matcher](/docs/json/apps/http/servers/routes/match/header/).
+ Headers http.Header `json:"headers,omitempty"`
+}
+
+// Match returns true if the given statusCode and hdr match rm.
+func (rm ResponseMatcher) Match(statusCode int, hdr http.Header) bool {
+ if !rm.matchStatusCode(statusCode) {
+ return false
+ }
+ return matchHeaders(hdr, rm.Headers, "", []string{}, nil)
+}
+
+func (rm ResponseMatcher) matchStatusCode(statusCode int) bool {
+ if rm.StatusCode == nil {
+ return true
+ }
+ for _, code := range rm.StatusCode {
+ if StatusCodeMatches(statusCode, code) {
+ return true
+ }
+ }
+ return false
+}
+
+// ParseNamedResponseMatcher parses the tokens of a named response matcher.
+//
+// @name {
+// header []
+// status
+// }
+//
+// Or, single line syntax:
+//
+// @name [header []] | [status ]
+func ParseNamedResponseMatcher(d *caddyfile.Dispenser, matchers map[string]ResponseMatcher) error {
+ d.Next() // consume matcher name
+ definitionName := d.Val()
+
+ if _, ok := matchers[definitionName]; ok {
+ return d.Errf("matcher is defined more than once: %s", definitionName)
+ }
+
+ matcher := ResponseMatcher{}
+ for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
+ switch d.Val() {
+ case "header":
+ if matcher.Headers == nil {
+ matcher.Headers = http.Header{}
+ }
+
+ // reuse the header request matcher's unmarshaler
+ headerMatcher := MatchHeader(matcher.Headers)
+ err := headerMatcher.UnmarshalCaddyfile(d.NewFromNextSegment())
+ if err != nil {
+ return err
+ }
+
+ matcher.Headers = http.Header(headerMatcher)
+ case "status":
+ if matcher.StatusCode == nil {
+ matcher.StatusCode = []int{}
+ }
+
+ args := d.RemainingArgs()
+ if len(args) == 0 {
+ return d.ArgErr()
+ }
+
+ for _, arg := range args {
+ if len(arg) == 3 && strings.HasSuffix(arg, "xx") {
+ arg = arg[:1]
+ }
+ statusNum, err := strconv.Atoi(arg)
+ if err != nil {
+ return d.Errf("bad status value '%s': %v", arg, err)
+ }
+ matcher.StatusCode = append(matcher.StatusCode, statusNum)
+ }
+ default:
+ return d.Errf("unrecognized response matcher %s", d.Val())
+ }
+ }
+ matchers[definitionName] = matcher
+ return nil
+}
diff --git a/modules/caddyhttp/responsematchers_test.go b/modules/caddyhttp/responsematchers_test.go
new file mode 100644
index 00000000000..f5bb6f18fbc
--- /dev/null
+++ b/modules/caddyhttp/responsematchers_test.go
@@ -0,0 +1,169 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "net/http"
+ "testing"
+)
+
+func TestResponseMatcher(t *testing.T) {
+ for i, tc := range []struct {
+ require ResponseMatcher
+ status int
+ hdr http.Header // make sure these are canonical cased (std lib will do that in a real request)
+ expect bool
+ }{
+ {
+ require: ResponseMatcher{},
+ status: 200,
+ expect: true,
+ },
+ {
+ require: ResponseMatcher{
+ StatusCode: []int{200},
+ },
+ status: 200,
+ expect: true,
+ },
+ {
+ require: ResponseMatcher{
+ StatusCode: []int{2},
+ },
+ status: 200,
+ expect: true,
+ },
+ {
+ require: ResponseMatcher{
+ StatusCode: []int{201},
+ },
+ status: 200,
+ expect: false,
+ },
+ {
+ require: ResponseMatcher{
+ StatusCode: []int{2},
+ },
+ status: 301,
+ expect: false,
+ },
+ {
+ require: ResponseMatcher{
+ StatusCode: []int{3},
+ },
+ status: 301,
+ expect: true,
+ },
+ {
+ require: ResponseMatcher{
+ StatusCode: []int{3},
+ },
+ status: 399,
+ expect: true,
+ },
+ {
+ require: ResponseMatcher{
+ StatusCode: []int{3},
+ },
+ status: 400,
+ expect: false,
+ },
+ {
+ require: ResponseMatcher{
+ StatusCode: []int{3, 4},
+ },
+ status: 400,
+ expect: true,
+ },
+ {
+ require: ResponseMatcher{
+ StatusCode: []int{3, 401},
+ },
+ status: 401,
+ expect: true,
+ },
+ {
+ require: ResponseMatcher{
+ Headers: http.Header{
+ "Foo": []string{"bar"},
+ },
+ },
+ hdr: http.Header{"Foo": []string{"bar"}},
+ expect: true,
+ },
+ {
+ require: ResponseMatcher{
+ Headers: http.Header{
+ "Foo2": []string{"bar"},
+ },
+ },
+ hdr: http.Header{"Foo": []string{"bar"}},
+ expect: false,
+ },
+ {
+ require: ResponseMatcher{
+ Headers: http.Header{
+ "Foo": []string{"bar", "baz"},
+ },
+ },
+ hdr: http.Header{"Foo": []string{"baz"}},
+ expect: true,
+ },
+ {
+ require: ResponseMatcher{
+ Headers: http.Header{
+ "Foo": []string{"bar"},
+ "Foo2": []string{"baz"},
+ },
+ },
+ hdr: http.Header{"Foo": []string{"baz"}},
+ expect: false,
+ },
+ {
+ require: ResponseMatcher{
+ Headers: http.Header{
+ "Foo": []string{"bar"},
+ "Foo2": []string{"baz"},
+ },
+ },
+ hdr: http.Header{"Foo": []string{"bar"}, "Foo2": []string{"baz"}},
+ expect: true,
+ },
+ {
+ require: ResponseMatcher{
+ Headers: http.Header{
+ "Foo": []string{"foo*"},
+ },
+ },
+ hdr: http.Header{"Foo": []string{"foobar"}},
+ expect: true,
+ },
+ {
+ require: ResponseMatcher{
+ Headers: http.Header{
+ "Foo": []string{"foo*"},
+ },
+ },
+ hdr: http.Header{"Foo": []string{"foobar"}},
+ expect: true,
+ },
+ } {
+ actual := tc.require.Match(tc.status, tc.hdr)
+ if actual != tc.expect {
+ t.Errorf("Test %d %v: Expected %t, got %t for HTTP %d %v", i, tc.require, tc.expect, actual, tc.status, tc.hdr)
+ continue
+ }
+ }
+}
diff --git a/modules/caddyhttp/responsewriter.go b/modules/caddyhttp/responsewriter.go
new file mode 100644
index 00000000000..904c30c0352
--- /dev/null
+++ b/modules/caddyhttp/responsewriter.go
@@ -0,0 +1,344 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyhttp
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+)
+
+// ResponseWriterWrapper wraps an underlying ResponseWriter and
+// promotes its Pusher method as well. To use this type, embed
+// a pointer to it within your own struct type that implements
+// the http.ResponseWriter interface, then call methods on the
+// embedded value.
+type ResponseWriterWrapper struct {
+ http.ResponseWriter
+}
+
+// Push implements http.Pusher. It simply calls the underlying
+// ResponseWriter's Push method if there is one, or returns
+// ErrNotImplemented otherwise.
+func (rww *ResponseWriterWrapper) Push(target string, opts *http.PushOptions) error {
+ if pusher, ok := rww.ResponseWriter.(http.Pusher); ok {
+ return pusher.Push(target, opts)
+ }
+ return ErrNotImplemented
+}
+
+// ReadFrom implements io.ReaderFrom. It retries to use io.ReaderFrom if available,
+// then fallback to io.Copy.
+// see: https://github.com/caddyserver/caddy/issues/6546
+func (rww *ResponseWriterWrapper) ReadFrom(r io.Reader) (n int64, err error) {
+ if rf, ok := rww.ResponseWriter.(io.ReaderFrom); ok {
+ return rf.ReadFrom(r)
+ }
+ return io.Copy(rww.ResponseWriter, r)
+}
+
+// Unwrap returns the underlying ResponseWriter, necessary for
+// http.ResponseController to work correctly.
+func (rww *ResponseWriterWrapper) Unwrap() http.ResponseWriter {
+ return rww.ResponseWriter
+}
+
+// ErrNotImplemented is returned when an underlying
+// ResponseWriter does not implement the required method.
+var ErrNotImplemented = fmt.Errorf("method not implemented")
+
+type responseRecorder struct {
+ *ResponseWriterWrapper
+ statusCode int
+ buf *bytes.Buffer
+ shouldBuffer ShouldBufferFunc
+ size int
+ wroteHeader bool
+ stream bool
+
+ readSize *int
+}
+
+// NewResponseRecorder returns a new ResponseRecorder that can be
+// used instead of a standard http.ResponseWriter. The recorder is
+// useful for middlewares which need to buffer a response and
+// potentially process its entire body before actually writing the
+// response to the underlying writer. Of course, buffering the entire
+// body has a memory overhead, but sometimes there is no way to avoid
+// buffering the whole response, hence the existence of this type.
+// Still, if at all practical, handlers should strive to stream
+// responses by wrapping Write and WriteHeader methods instead of
+// buffering whole response bodies.
+//
+// Buffering is actually optional. The shouldBuffer function will
+// be called just before the headers are written. If it returns
+// true, the headers and body will be buffered by this recorder
+// and not written to the underlying writer; if false, the headers
+// will be written immediately and the body will be streamed out
+// directly to the underlying writer. If shouldBuffer is nil,
+// the response will never be buffered and will always be streamed
+// directly to the writer.
+//
+// You can know if shouldBuffer returned true by calling Buffered().
+//
+// The provided buffer buf should be obtained from a pool for best
+// performance (see the sync.Pool type).
+//
+// Proper usage of a recorder looks like this:
+//
+// rec := caddyhttp.NewResponseRecorder(w, buf, shouldBuffer)
+// err := next.ServeHTTP(rec, req)
+// if err != nil {
+// return err
+// }
+// if !rec.Buffered() {
+// return nil
+// }
+// // process the buffered response here
+//
+// The header map is not buffered; i.e. the ResponseRecorder's Header()
+// method returns the same header map of the underlying ResponseWriter.
+// This is a crucial design decision to allow HTTP trailers to be
+// flushed properly (https://github.com/caddyserver/caddy/issues/3236).
+//
+// Once you are ready to write the response, there are two ways you can
+// do it. The easier way is to have the recorder do it:
+//
+// rec.WriteResponse()
+//
+// This writes the recorded response headers as well as the buffered body.
+// Or, you may wish to do it yourself, especially if you manipulated the
+// buffered body. First you will need to write the headers with the
+// recorded status code, then write the body (this example writes the
+// recorder's body buffer, but you might have your own body to write
+// instead):
+//
+// w.WriteHeader(rec.Status())
+// io.Copy(w, rec.Buffer())
+//
+// As a special case, 1xx responses are not buffered nor recorded
+// because they are not the final response; they are passed through
+// directly to the underlying ResponseWriter.
+func NewResponseRecorder(w http.ResponseWriter, buf *bytes.Buffer, shouldBuffer ShouldBufferFunc) ResponseRecorder {
+ return &responseRecorder{
+ ResponseWriterWrapper: &ResponseWriterWrapper{ResponseWriter: w},
+ buf: buf,
+ shouldBuffer: shouldBuffer,
+ }
+}
+
+// WriteHeader writes the headers with statusCode to the wrapped
+// ResponseWriter unless the response is to be buffered instead.
+// 1xx responses are never buffered.
+func (rr *responseRecorder) WriteHeader(statusCode int) {
+ if rr.wroteHeader {
+ return
+ }
+
+ // save statusCode always, in case HTTP middleware upgrades websocket
+ // connections by manually setting headers and writing status 101
+ rr.statusCode = statusCode
+
+ // decide whether we should buffer the response
+ if rr.shouldBuffer == nil {
+ rr.stream = true
+ } else {
+ rr.stream = !rr.shouldBuffer(rr.statusCode, rr.ResponseWriterWrapper.Header())
+ }
+
+ // 1xx responses aren't final; just informational
+ if statusCode < 100 || statusCode > 199 {
+ rr.wroteHeader = true
+ }
+
+ // if informational or not buffered, immediately write header
+ if rr.stream || (100 <= statusCode && statusCode <= 199) {
+ rr.ResponseWriterWrapper.WriteHeader(statusCode)
+ }
+}
+
+func (rr *responseRecorder) Write(data []byte) (int, error) {
+ rr.WriteHeader(http.StatusOK)
+ var n int
+ var err error
+ if rr.stream {
+ n, err = rr.ResponseWriterWrapper.Write(data)
+ } else {
+ n, err = rr.buf.Write(data)
+ }
+
+ rr.size += n
+ return n, err
+}
+
+func (rr *responseRecorder) ReadFrom(r io.Reader) (int64, error) {
+ rr.WriteHeader(http.StatusOK)
+ var n int64
+ var err error
+ if rr.stream {
+ n, err = rr.ResponseWriterWrapper.ReadFrom(r)
+ } else {
+ n, err = rr.buf.ReadFrom(r)
+ }
+
+ rr.size += int(n)
+ return n, err
+}
+
+// Status returns the status code that was written, if any.
+func (rr *responseRecorder) Status() int {
+ return rr.statusCode
+}
+
+// Size returns the number of bytes written,
+// not including the response headers.
+func (rr *responseRecorder) Size() int {
+ return rr.size
+}
+
+// Buffer returns the body buffer that rr was created with.
+// You should still have your original pointer, though.
+func (rr *responseRecorder) Buffer() *bytes.Buffer {
+ return rr.buf
+}
+
+// Buffered returns whether rr has decided to buffer the response.
+func (rr *responseRecorder) Buffered() bool {
+ return !rr.stream
+}
+
+func (rr *responseRecorder) WriteResponse() error {
+ if rr.statusCode == 0 {
+ // could happen if no handlers actually wrote anything,
+ // and this prevents a panic; status must be > 0
+ rr.WriteHeader(http.StatusOK)
+ }
+ if rr.stream {
+ return nil
+ }
+ rr.ResponseWriterWrapper.WriteHeader(rr.statusCode)
+ _, err := io.Copy(rr.ResponseWriterWrapper, rr.buf)
+ return err
+}
+
+// FlushError will suppress actual flushing if the response is buffered. See:
+// https://github.com/caddyserver/caddy/issues/6144
+func (rr *responseRecorder) FlushError() error {
+ if rr.stream {
+ //nolint:bodyclose
+ return http.NewResponseController(rr.ResponseWriterWrapper).Flush()
+ }
+ return nil
+}
+
+// Private interface so it can only be used in this package
+// #TODO: maybe export it later
+func (rr *responseRecorder) setReadSize(size *int) {
+ rr.readSize = size
+}
+
+func (rr *responseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ //nolint:bodyclose
+ conn, brw, err := http.NewResponseController(rr.ResponseWriterWrapper).Hijack()
+ if err != nil {
+ return nil, nil, err
+ }
+ // Per http documentation, returned bufio.Writer is empty, but bufio.Read maybe not
+ conn = &hijackedConn{conn, rr}
+ brw.Writer.Reset(conn)
+
+ buffered := brw.Reader.Buffered()
+ if buffered != 0 {
+ conn.(*hijackedConn).updateReadSize(buffered)
+ data, _ := brw.Peek(buffered)
+ brw.Reader.Reset(io.MultiReader(bytes.NewReader(data), conn))
+ // peek to make buffered data appear, as Reset will make it 0
+ _, _ = brw.Peek(buffered)
+ } else {
+ brw.Reader.Reset(conn)
+ }
+ return conn, brw, nil
+}
+
+// used to track the size of hijacked response writers
+type hijackedConn struct {
+ net.Conn
+ rr *responseRecorder
+}
+
+func (hc *hijackedConn) updateReadSize(n int) {
+ if hc.rr.readSize != nil {
+ *hc.rr.readSize += n
+ }
+}
+
+func (hc *hijackedConn) Read(p []byte) (int, error) {
+ n, err := hc.Conn.Read(p)
+ hc.updateReadSize(n)
+ return n, err
+}
+
+func (hc *hijackedConn) WriteTo(w io.Writer) (int64, error) {
+ n, err := io.Copy(w, hc.Conn)
+ hc.updateReadSize(int(n))
+ return n, err
+}
+
+func (hc *hijackedConn) Write(p []byte) (int, error) {
+ n, err := hc.Conn.Write(p)
+ hc.rr.size += n
+ return n, err
+}
+
+func (hc *hijackedConn) ReadFrom(r io.Reader) (int64, error) {
+ n, err := io.Copy(hc.Conn, r)
+ hc.rr.size += int(n)
+ return n, err
+}
+
+// ResponseRecorder is a http.ResponseWriter that records
+// responses instead of writing them to the client. See
+// docs for NewResponseRecorder for proper usage.
+type ResponseRecorder interface {
+ http.ResponseWriter
+ Status() int
+ Buffer() *bytes.Buffer
+ Buffered() bool
+ Size() int
+ WriteResponse() error
+}
+
+// ShouldBufferFunc is a function that returns true if the
+// response should be buffered, given the pending HTTP status
+// code and response headers.
+type ShouldBufferFunc func(status int, header http.Header) bool
+
+// Interface guards
+var (
+ _ http.ResponseWriter = (*ResponseWriterWrapper)(nil)
+ _ ResponseRecorder = (*responseRecorder)(nil)
+
+ // Implementing ReaderFrom can be such a significant
+ // optimization that it should probably be required!
+ // see PR #5022 (25%-50% speedup)
+ _ io.ReaderFrom = (*ResponseWriterWrapper)(nil)
+ _ io.ReaderFrom = (*responseRecorder)(nil)
+ _ io.ReaderFrom = (*hijackedConn)(nil)
+
+ _ io.WriterTo = (*hijackedConn)(nil)
+)
diff --git a/modules/caddyhttp/responsewriter_test.go b/modules/caddyhttp/responsewriter_test.go
new file mode 100644
index 00000000000..c08ad26a472
--- /dev/null
+++ b/modules/caddyhttp/responsewriter_test.go
@@ -0,0 +1,171 @@
+package caddyhttp
+
+import (
+ "bytes"
+ "io"
+ "net/http"
+ "strings"
+ "testing"
+)
+
+type responseWriterSpy interface {
+ http.ResponseWriter
+ Written() string
+ CalledReadFrom() bool
+}
+
+var (
+ _ responseWriterSpy = (*baseRespWriter)(nil)
+ _ responseWriterSpy = (*readFromRespWriter)(nil)
+)
+
+// a barebones http.ResponseWriter mock
+type baseRespWriter []byte
+
+func (brw *baseRespWriter) Write(d []byte) (int, error) {
+ *brw = append(*brw, d...)
+ return len(d), nil
+}
+func (brw *baseRespWriter) Header() http.Header { return nil }
+func (brw *baseRespWriter) WriteHeader(statusCode int) {}
+func (brw *baseRespWriter) Written() string { return string(*brw) }
+func (brw *baseRespWriter) CalledReadFrom() bool { return false }
+
+// an http.ResponseWriter mock that supports ReadFrom
+type readFromRespWriter struct {
+ baseRespWriter
+ called bool
+}
+
+func (rf *readFromRespWriter) ReadFrom(r io.Reader) (int64, error) {
+ rf.called = true
+ return io.Copy(&rf.baseRespWriter, r)
+}
+
+func (rf *readFromRespWriter) CalledReadFrom() bool { return rf.called }
+
+func TestResponseWriterWrapperReadFrom(t *testing.T) {
+ tests := map[string]struct {
+ responseWriter responseWriterSpy
+ wantReadFrom bool
+ }{
+ "no ReadFrom": {
+ responseWriter: &baseRespWriter{},
+ wantReadFrom: false,
+ },
+ "has ReadFrom": {
+ responseWriter: &readFromRespWriter{},
+ wantReadFrom: true,
+ },
+ }
+ for name, tt := range tests {
+ t.Run(name, func(t *testing.T) {
+ // what we expect middlewares to do:
+ type myWrapper struct {
+ *ResponseWriterWrapper
+ }
+
+ wrapped := myWrapper{
+ ResponseWriterWrapper: &ResponseWriterWrapper{ResponseWriter: tt.responseWriter},
+ }
+
+ const srcData = "boo!"
+ // hides everything but Read, since strings.Reader implements WriteTo it would
+ // take precedence over our ReadFrom.
+ src := struct{ io.Reader }{strings.NewReader(srcData)}
+
+ if _, err := io.Copy(wrapped, src); err != nil {
+ t.Errorf("%s: Copy() err = %v", name, err)
+ }
+
+ if got := tt.responseWriter.Written(); got != srcData {
+ t.Errorf("%s: data = %q, want %q", name, got, srcData)
+ }
+
+ if tt.responseWriter.CalledReadFrom() != tt.wantReadFrom {
+ if tt.wantReadFrom {
+ t.Errorf("%s: ReadFrom() should have been called", name)
+ } else {
+ t.Errorf("%s: ReadFrom() should not have been called", name)
+ }
+ }
+ })
+ }
+}
+
+func TestResponseWriterWrapperUnwrap(t *testing.T) {
+ w := &ResponseWriterWrapper{&baseRespWriter{}}
+
+ if _, ok := w.Unwrap().(*baseRespWriter); !ok {
+ t.Errorf("Unwrap() doesn't return the underlying ResponseWriter")
+ }
+}
+
+func TestResponseRecorderReadFrom(t *testing.T) {
+ tests := map[string]struct {
+ responseWriter responseWriterSpy
+ shouldBuffer bool
+ wantReadFrom bool
+ }{
+ "buffered plain": {
+ responseWriter: &baseRespWriter{},
+ shouldBuffer: true,
+ wantReadFrom: false,
+ },
+ "streamed plain": {
+ responseWriter: &baseRespWriter{},
+ shouldBuffer: false,
+ wantReadFrom: false,
+ },
+ "buffered ReadFrom": {
+ responseWriter: &readFromRespWriter{},
+ shouldBuffer: true,
+ wantReadFrom: false,
+ },
+ "streamed ReadFrom": {
+ responseWriter: &readFromRespWriter{},
+ shouldBuffer: false,
+ wantReadFrom: true,
+ },
+ }
+ for name, tt := range tests {
+ t.Run(name, func(t *testing.T) {
+ var buf bytes.Buffer
+
+ rr := NewResponseRecorder(tt.responseWriter, &buf, func(status int, header http.Header) bool {
+ return tt.shouldBuffer
+ })
+
+ const srcData = "boo!"
+ // hides everything but Read, since strings.Reader implements WriteTo it would
+ // take precedence over our ReadFrom.
+ src := struct{ io.Reader }{strings.NewReader(srcData)}
+
+ if _, err := io.Copy(rr, src); err != nil {
+ t.Errorf("Copy() err = %v", err)
+ }
+
+ wantStreamed := srcData
+ wantBuffered := ""
+ if tt.shouldBuffer {
+ wantStreamed = ""
+ wantBuffered = srcData
+ }
+
+ if got := tt.responseWriter.Written(); got != wantStreamed {
+ t.Errorf("streamed data = %q, want %q", got, wantStreamed)
+ }
+ if got := buf.String(); got != wantBuffered {
+ t.Errorf("buffered data = %q, want %q", got, wantBuffered)
+ }
+
+ if tt.responseWriter.CalledReadFrom() != tt.wantReadFrom {
+ if tt.wantReadFrom {
+ t.Errorf("ReadFrom() should have been called")
+ } else {
+ t.Errorf("ReadFrom() should not have been called")
+ }
+ }
+ })
+ }
+}
diff --git a/modules/caddyhttp/reverseproxy/addresses.go b/modules/caddyhttp/reverseproxy/addresses.go
new file mode 100644
index 00000000000..31f4aeb3502
--- /dev/null
+++ b/modules/caddyhttp/reverseproxy/addresses.go
@@ -0,0 +1,151 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package reverseproxy
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+type parsedAddr struct {
+ network, scheme, host, port string
+ valid bool
+}
+
+func (p parsedAddr) dialAddr() string {
+ if !p.valid {
+ return ""
+ }
+ // for simplest possible config, we only need to include
+ // the network portion if the user specified one
+ if p.network != "" {
+ return caddy.JoinNetworkAddress(p.network, p.host, p.port)
+ }
+
+ // if the host is a placeholder, then we don't want to join with an empty port,
+ // because that would just append an extra ':' at the end of the address.
+ if p.port == "" && strings.Contains(p.host, "{") {
+ return p.host
+ }
+ return net.JoinHostPort(p.host, p.port)
+}
+
+func (p parsedAddr) rangedPort() bool {
+ return strings.Contains(p.port, "-")
+}
+
+func (p parsedAddr) replaceablePort() bool {
+ return strings.Contains(p.port, "{") && strings.Contains(p.port, "}")
+}
+
+func (p parsedAddr) isUnix() bool {
+ return caddy.IsUnixNetwork(p.network)
+}
+
+// parseUpstreamDialAddress parses configuration inputs for
+// the dial address, including support for a scheme in front
+// as a shortcut for the port number, and a network type,
+// for example 'unix' to dial a unix socket.
+func parseUpstreamDialAddress(upstreamAddr string) (parsedAddr, error) {
+ var network, scheme, host, port string
+
+ if strings.Contains(upstreamAddr, "://") {
+ // we get a parsing error if a placeholder is specified
+ // so we return a more user-friendly error message instead
+ // to explain what to do instead
+ if strings.Contains(upstreamAddr, "{") {
+ return parsedAddr{}, fmt.Errorf("due to parsing difficulties, placeholders are not allowed when an upstream address contains a scheme")
+ }
+
+ toURL, err := url.Parse(upstreamAddr)
+ if err != nil {
+ // if the error seems to be due to a port range,
+ // try to replace the port range with a dummy
+ // single port so that url.Parse() will succeed
+ if strings.Contains(err.Error(), "invalid port") && strings.Contains(err.Error(), "-") {
+ index := strings.LastIndex(upstreamAddr, ":")
+ if index == -1 {
+ return parsedAddr{}, fmt.Errorf("parsing upstream URL: %v", err)
+ }
+ portRange := upstreamAddr[index+1:]
+ if strings.Count(portRange, "-") != 1 {
+ return parsedAddr{}, fmt.Errorf("parsing upstream URL: parse \"%v\": port range invalid: %v", upstreamAddr, portRange)
+ }
+ toURL, err = url.Parse(strings.ReplaceAll(upstreamAddr, portRange, "0"))
+ if err != nil {
+ return parsedAddr{}, fmt.Errorf("parsing upstream URL: %v", err)
+ }
+ port = portRange
+ } else {
+ return parsedAddr{}, fmt.Errorf("parsing upstream URL: %v", err)
+ }
+ }
+ if port == "" {
+ port = toURL.Port()
+ }
+
+ // there is currently no way to perform a URL rewrite between choosing
+ // a backend and proxying to it, so we cannot allow extra components
+ // in backend URLs
+ if toURL.Path != "" || toURL.RawQuery != "" || toURL.Fragment != "" {
+ return parsedAddr{}, fmt.Errorf("for now, URLs for proxy upstreams only support scheme, host, and port components")
+ }
+
+ // ensure the port and scheme aren't in conflict
+ if toURL.Scheme == "http" && port == "443" {
+ return parsedAddr{}, fmt.Errorf("upstream address has conflicting scheme (http://) and port (:443, the HTTPS port)")
+ }
+ if toURL.Scheme == "https" && port == "80" {
+ return parsedAddr{}, fmt.Errorf("upstream address has conflicting scheme (https://) and port (:80, the HTTP port)")
+ }
+ if toURL.Scheme == "h2c" && port == "443" {
+ return parsedAddr{}, fmt.Errorf("upstream address has conflicting scheme (h2c://) and port (:443, the HTTPS port)")
+ }
+
+ // if port is missing, attempt to infer from scheme
+ if port == "" {
+ switch toURL.Scheme {
+ case "", "http", "h2c":
+ port = "80"
+ case "https":
+ port = "443"
+ }
+ }
+
+ scheme, host = toURL.Scheme, toURL.Hostname()
+ } else {
+ var err error
+ network, host, port, err = caddy.SplitNetworkAddress(upstreamAddr)
+ if err != nil {
+ host = upstreamAddr
+ }
+ // we can assume a port if only a hostname is specified, but use of a
+ // placeholder without a port likely means a port will be filled in
+ if port == "" && !strings.Contains(host, "{") && !caddy.IsUnixNetwork(network) && !caddy.IsFdNetwork(network) {
+ port = "80"
+ }
+ }
+
+ // special case network to support both unix and h2c at the same time
+ if network == "unix+h2c" {
+ network = "unix"
+ scheme = "h2c"
+ }
+ return parsedAddr{network, scheme, host, port, true}, nil
+}
diff --git a/modules/caddyhttp/reverseproxy/addresses_test.go b/modules/caddyhttp/reverseproxy/addresses_test.go
new file mode 100644
index 00000000000..0c514194290
--- /dev/null
+++ b/modules/caddyhttp/reverseproxy/addresses_test.go
@@ -0,0 +1,282 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package reverseproxy
+
+import "testing"
+
+func TestParseUpstreamDialAddress(t *testing.T) {
+ for i, tc := range []struct {
+ input string
+ expectHostPort string
+ expectScheme string
+ expectErr bool
+ }{
+ {
+ input: "foo",
+ expectHostPort: "foo:80",
+ },
+ {
+ input: "foo:1234",
+ expectHostPort: "foo:1234",
+ },
+ {
+ input: "127.0.0.1",
+ expectHostPort: "127.0.0.1:80",
+ },
+ {
+ input: "127.0.0.1:1234",
+ expectHostPort: "127.0.0.1:1234",
+ },
+ {
+ input: "[::1]",
+ expectHostPort: "[::1]:80",
+ },
+ {
+ input: "[::1]:1234",
+ expectHostPort: "[::1]:1234",
+ },
+ {
+ input: "{foo}",
+ expectHostPort: "{foo}",
+ },
+ {
+ input: "{foo}:80",
+ expectHostPort: "{foo}:80",
+ },
+ {
+ input: "{foo}:{bar}",
+ expectHostPort: "{foo}:{bar}",
+ },
+ {
+ input: "http://foo",
+ expectHostPort: "foo:80",
+ expectScheme: "http",
+ },
+ {
+ input: "http://foo:1234",
+ expectHostPort: "foo:1234",
+ expectScheme: "http",
+ },
+ {
+ input: "http://127.0.0.1",
+ expectHostPort: "127.0.0.1:80",
+ expectScheme: "http",
+ },
+ {
+ input: "http://127.0.0.1:1234",
+ expectHostPort: "127.0.0.1:1234",
+ expectScheme: "http",
+ },
+ {
+ input: "http://[::1]",
+ expectHostPort: "[::1]:80",
+ expectScheme: "http",
+ },
+ {
+ input: "http://[::1]:80",
+ expectHostPort: "[::1]:80",
+ expectScheme: "http",
+ },
+ {
+ input: "https://foo",
+ expectHostPort: "foo:443",
+ expectScheme: "https",
+ },
+ {
+ input: "https://foo:1234",
+ expectHostPort: "foo:1234",
+ expectScheme: "https",
+ },
+ {
+ input: "https://127.0.0.1",
+ expectHostPort: "127.0.0.1:443",
+ expectScheme: "https",
+ },
+ {
+ input: "https://127.0.0.1:1234",
+ expectHostPort: "127.0.0.1:1234",
+ expectScheme: "https",
+ },
+ {
+ input: "https://[::1]",
+ expectHostPort: "[::1]:443",
+ expectScheme: "https",
+ },
+ {
+ input: "https://[::1]:1234",
+ expectHostPort: "[::1]:1234",
+ expectScheme: "https",
+ },
+ {
+ input: "h2c://foo",
+ expectHostPort: "foo:80",
+ expectScheme: "h2c",
+ },
+ {
+ input: "h2c://foo:1234",
+ expectHostPort: "foo:1234",
+ expectScheme: "h2c",
+ },
+ {
+ input: "h2c://127.0.0.1",
+ expectHostPort: "127.0.0.1:80",
+ expectScheme: "h2c",
+ },
+ {
+ input: "h2c://127.0.0.1:1234",
+ expectHostPort: "127.0.0.1:1234",
+ expectScheme: "h2c",
+ },
+ {
+ input: "h2c://[::1]",
+ expectHostPort: "[::1]:80",
+ expectScheme: "h2c",
+ },
+ {
+ input: "h2c://[::1]:1234",
+ expectHostPort: "[::1]:1234",
+ expectScheme: "h2c",
+ },
+ {
+ input: "localhost:1001-1009",
+ expectHostPort: "localhost:1001-1009",
+ },
+ {
+ input: "{host}:1001-1009",
+ expectHostPort: "{host}:1001-1009",
+ },
+ {
+ input: "http://localhost:1001-1009",
+ expectHostPort: "localhost:1001-1009",
+ expectScheme: "http",
+ },
+ {
+ input: "https://localhost:1001-1009",
+ expectHostPort: "localhost:1001-1009",
+ expectScheme: "https",
+ },
+ {
+ input: "unix//var/php.sock",
+ expectHostPort: "unix//var/php.sock",
+ },
+ {
+ input: "unix+h2c//var/grpc.sock",
+ expectHostPort: "unix//var/grpc.sock",
+ expectScheme: "h2c",
+ },
+ {
+ input: "unix/{foo}",
+ expectHostPort: "unix/{foo}",
+ },
+ {
+ input: "unix+h2c/{foo}",
+ expectHostPort: "unix/{foo}",
+ expectScheme: "h2c",
+ },
+ {
+ input: "unix//foo/{foo}/bar",
+ expectHostPort: "unix//foo/{foo}/bar",
+ },
+ {
+ input: "unix+h2c//foo/{foo}/bar",
+ expectHostPort: "unix//foo/{foo}/bar",
+ expectScheme: "h2c",
+ },
+ {
+ input: "http://{foo}",
+ expectErr: true,
+ },
+ {
+ input: "http:// :80",
+ expectErr: true,
+ },
+ {
+ input: "http://localhost/path",
+ expectErr: true,
+ },
+ {
+ input: "http://localhost?key=value",
+ expectErr: true,
+ },
+ {
+ input: "http://localhost#fragment",
+ expectErr: true,
+ },
+ {
+ input: "http://localhost:8001-8002-8003",
+ expectErr: true,
+ },
+ {
+ input: "http://localhost:8001-8002/foo:bar",
+ expectErr: true,
+ },
+ {
+ input: "http://localhost:8001-8002/foo:1",
+ expectErr: true,
+ },
+ {
+ input: "http://localhost:8001-8002/foo:1-2",
+ expectErr: true,
+ },
+ {
+ input: "http://localhost:8001-8002#foo:1",
+ expectErr: true,
+ },
+ {
+ input: "http://foo:443",
+ expectErr: true,
+ },
+ {
+ input: "https://foo:80",
+ expectErr: true,
+ },
+ {
+ input: "h2c://foo:443",
+ expectErr: true,
+ },
+ {
+ input: `unix/c:\absolute\path`,
+ expectHostPort: `unix/c:\absolute\path`,
+ },
+ {
+ input: `unix+h2c/c:\absolute\path`,
+ expectHostPort: `unix/c:\absolute\path`,
+ expectScheme: "h2c",
+ },
+ {
+ input: "unix/c:/absolute/path",
+ expectHostPort: "unix/c:/absolute/path",
+ },
+ {
+ input: "unix+h2c/c:/absolute/path",
+ expectHostPort: "unix/c:/absolute/path",
+ expectScheme: "h2c",
+ },
+ } {
+ actualAddr, err := parseUpstreamDialAddress(tc.input)
+ if tc.expectErr && err == nil {
+ t.Errorf("Test %d: Expected error but got %v", i, err)
+ }
+ if !tc.expectErr && err != nil {
+ t.Errorf("Test %d: Expected no error but got %v", i, err)
+ }
+ if actualAddr.dialAddr() != tc.expectHostPort {
+ t.Errorf("Test %d: input %s: Expected host and port '%s' but got '%s'", i, tc.input, tc.expectHostPort, actualAddr.dialAddr())
+ }
+ if actualAddr.scheme != tc.expectScheme {
+ t.Errorf("Test %d: Expected scheme '%s' but got '%s'", i, tc.expectScheme, actualAddr.scheme)
+ }
+ }
+}
diff --git a/modules/caddyhttp/reverseproxy/admin.go b/modules/caddyhttp/reverseproxy/admin.go
new file mode 100644
index 00000000000..7e72a4cdb51
--- /dev/null
+++ b/modules/caddyhttp/reverseproxy/admin.go
@@ -0,0 +1,120 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package reverseproxy
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/caddyserver/caddy/v2"
+)
+
+func init() {
+ caddy.RegisterModule(adminUpstreams{})
+}
+
+// adminUpstreams is a module that provides the
+// /reverse_proxy/upstreams endpoint for the Caddy admin
+// API. This allows for checking the health of configured
+// reverse proxy upstreams in the pool.
+type adminUpstreams struct{}
+
+// upstreamStatus holds the status of a particular upstream
+type upstreamStatus struct {
+ Address string `json:"address"`
+ NumRequests int `json:"num_requests"`
+ Fails int `json:"fails"`
+}
+
+// CaddyModule returns the Caddy module information.
+func (adminUpstreams) CaddyModule() caddy.ModuleInfo {
+ return caddy.ModuleInfo{
+ ID: "admin.api.reverse_proxy",
+ New: func() caddy.Module { return new(adminUpstreams) },
+ }
+}
+
+// Routes returns a route for the /reverse_proxy/upstreams endpoint.
+func (al adminUpstreams) Routes() []caddy.AdminRoute {
+ return []caddy.AdminRoute{
+ {
+ Pattern: "/reverse_proxy/upstreams",
+ Handler: caddy.AdminHandlerFunc(al.handleUpstreams),
+ },
+ }
+}
+
+// handleUpstreams reports the status of the reverse proxy
+// upstream pool.
+func (adminUpstreams) handleUpstreams(w http.ResponseWriter, r *http.Request) error {
+ if r.Method != http.MethodGet {
+ return caddy.APIError{
+ HTTPStatus: http.StatusMethodNotAllowed,
+ Err: fmt.Errorf("method not allowed"),
+ }
+ }
+
+ // Prep for a JSON response
+ w.Header().Set("Content-Type", "application/json")
+ enc := json.NewEncoder(w)
+
+ // Collect the results to respond with
+ results := []upstreamStatus{}
+
+ // Iterate over the upstream pool (needs to be fast)
+ var rangeErr error
+ hosts.Range(func(key, val any) bool {
+ address, ok := key.(string)
+ if !ok {
+ rangeErr = caddy.APIError{
+ HTTPStatus: http.StatusInternalServerError,
+ Err: fmt.Errorf("could not type assert upstream address"),
+ }
+ return false
+ }
+
+ upstream, ok := val.(*Host)
+ if !ok {
+ rangeErr = caddy.APIError{
+ HTTPStatus: http.StatusInternalServerError,
+ Err: fmt.Errorf("could not type assert upstream struct"),
+ }
+ return false
+ }
+
+ results = append(results, upstreamStatus{
+ Address: address,
+ NumRequests: upstream.NumRequests(),
+ Fails: upstream.Fails(),
+ })
+ return true
+ })
+
+ // If an error happened during the range, return it
+ if rangeErr != nil {
+ return rangeErr
+ }
+
+ err := enc.Encode(results)
+ if err != nil {
+ return caddy.APIError{
+ HTTPStatus: http.StatusInternalServerError,
+ Err: err,
+ }
+ }
+
+ return nil
+}
diff --git a/modules/caddyhttp/reverseproxy/ascii.go b/modules/caddyhttp/reverseproxy/ascii.go
new file mode 100644
index 00000000000..75b8220f353
--- /dev/null
+++ b/modules/caddyhttp/reverseproxy/ascii.go
@@ -0,0 +1,57 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Most of the code in this file was initially borrowed from the Go
+// standard library and modified; It had this copyright notice:
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Original source, copied because the package was marked internal:
+// https://github.com/golang/go/blob/5c489514bc5e61ad9b5b07bd7d8ec65d66a0512a/src/net/http/internal/ascii/print.go
+
+package reverseproxy
+
+// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
+// are equal, ASCII-case-insensitively.
+func asciiEqualFold(s, t string) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if asciiLower(s[i]) != asciiLower(t[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// asciiLower returns the ASCII lowercase version of b.
+func asciiLower(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// asciiIsPrint returns whether s is ASCII and printable according to
+// https://tools.ietf.org/html/rfc20#section-4.2.
+func asciiIsPrint(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] < ' ' || s[i] > '~' {
+ return false
+ }
+ }
+ return true
+}
diff --git a/modules/caddyhttp/reverseproxy/ascii_test.go b/modules/caddyhttp/reverseproxy/ascii_test.go
new file mode 100644
index 00000000000..de67963bd7c
--- /dev/null
+++ b/modules/caddyhttp/reverseproxy/ascii_test.go
@@ -0,0 +1,114 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Most of the code in this file was initially borrowed from the Go
+// standard library and modified; It had this copyright notice:
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Original source, copied because the package was marked internal:
+// https://github.com/golang/go/blob/5c489514bc5e61ad9b5b07bd7d8ec65d66a0512a/src/net/http/internal/ascii/print_test.go
+
+package reverseproxy
+
+import "testing"
+
+func TestEqualFold(t *testing.T) {
+ tests := []struct {
+ name string
+ a, b string
+ want bool
+ }{
+ {
+ name: "empty",
+ want: true,
+ },
+ {
+ name: "simple match",
+ a: "CHUNKED",
+ b: "chunked",
+ want: true,
+ },
+ {
+ name: "same string",
+ a: "chunked",
+ b: "chunked",
+ want: true,
+ },
+ {
+ name: "Unicode Kelvin symbol",
+ a: "chunKed", // This "K" is 'KELVIN SIGN' (\u212A)
+ b: "chunked",
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := asciiEqualFold(tt.a, tt.b); got != tt.want {
+ t.Errorf("AsciiEqualFold(%q,%q): got %v want %v", tt.a, tt.b, got, tt.want)
+ }
+ })
+ }
+}
+
+func TestIsPrint(t *testing.T) {
+ tests := []struct {
+ name string
+ in string
+ want bool
+ }{
+ {
+ name: "empty",
+ want: true,
+ },
+ {
+ name: "ASCII low",
+ in: "This is a space: ' '",
+ want: true,
+ },
+ {
+ name: "ASCII high",
+ in: "This is a tilde: '~'",
+ want: true,
+ },
+ {
+ name: "ASCII low non-print",
+ in: "This is a unit separator: \x1F",
+ want: false,
+ },
+ {
+ name: "Ascii high non-print",
+ in: "This is a Delete: \x7F",
+ want: false,
+ },
+ {
+ name: "Unicode letter",
+ in: "Today it's 280K outside: it's freezing!", // This "K" is 'KELVIN SIGN' (\u212A)
+ want: false,
+ },
+ {
+ name: "Unicode emoji",
+ in: "Gophers like 🧀",
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := asciiIsPrint(tt.in); got != tt.want {
+ t.Errorf("IsASCIIPrint(%q): got %v want %v", tt.in, got, tt.want)
+ }
+ })
+ }
+}
diff --git a/modules/caddyhttp/reverseproxy/caddyfile.go b/modules/caddyhttp/reverseproxy/caddyfile.go
new file mode 100644
index 00000000000..ab1dcdd029c
--- /dev/null
+++ b/modules/caddyhttp/reverseproxy/caddyfile.go
@@ -0,0 +1,1673 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package reverseproxy
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/dustin/go-humanize"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
+ "github.com/caddyserver/caddy/v2/internal"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite"
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
+)
+
+func init() {
+ httpcaddyfile.RegisterHandlerDirective("reverse_proxy", parseCaddyfile)
+ httpcaddyfile.RegisterHandlerDirective("copy_response", parseCopyResponseCaddyfile)
+ httpcaddyfile.RegisterHandlerDirective("copy_response_headers", parseCopyResponseHeadersCaddyfile)
+}
+
+func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
+ rp := new(Handler)
+ err := rp.UnmarshalCaddyfile(h.Dispenser)
+ if err != nil {
+ return nil, err
+ }
+ err = rp.FinalizeUnmarshalCaddyfile(h)
+ if err != nil {
+ return nil, err
+ }
+ return rp, nil
+}
+
+// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
+//
+// reverse_proxy [] [] {
+// # backends
+// to
+// dynamic [...]
+//
+// # load balancing
+// lb_policy []
+// lb_retries
+// lb_try_duration
+// lb_try_interval
+// lb_retry_match
+//
+// # active health checking
+// health_uri
+// health_port
+// health_interval
+// health_passes
+// health_fails
+// health_timeout
+// health_status
+// health_body
+// health_method
+// health_request_body
+// health_follow_redirects
+// health_headers {
+// []
+// }
+//
+// # passive health checking
+// fail_duration
+// max_fails
+// unhealthy_status
+// unhealthy_latency
+// unhealthy_request_count
+//
+// # streaming
+// flush_interval
+// request_buffers
+// response_buffers
+// stream_timeout
+// stream_close_delay
+// verbose_logs
+//
+// # request manipulation
+// trusted_proxies [private_ranges]
+// header_up [+|-] [ []]
+// header_down [+|-] [ []]
+// method
+// rewrite
+//
+// # round trip
+// transport {
+// ...
+// }
+//
+// # optionally intercept responses from upstream
+// @name {
+// status
+// header []
+// }
+// replace_status []
+// handle_response [