23 Commits
v2.0 ... v2.3

Author SHA1 Message Date
Moritz Marquardt
38938e884d Merge pull request 'Add redirect for GitHub-style non-".html" paths & force remove index.html suffix' (#13) from feature/github-style-nohtml-paths into main
Reviewed-on: https://codeberg.org/Codeberg/pages-server/pulls/13
2021-12-02 20:35:43 +01:00
Moritz Marquardt
57dce3b0c5 Add redirect for GitHub-style non-".html" paths & force remove index.html suffix
See https://codeberg.org/Codeberg/Community/issues/547 for more info
2021-12-02 20:35:43 +01:00
Moritz Marquardt
026a04e57e Merge pull request 'Change browser cache to 10 minutes to make bigger pages more performant' (#14) from feature/browser-side-caching into main
Reviewed-on: https://codeberg.org/Codeberg/pages-server/pulls/14
2021-12-02 20:35:33 +01:00
Moritz Marquardt
b6d7f5a6ee Change browser cache to 10 minutes to make bigger pages more performant 2021-12-02 20:35:33 +01:00
Moritz Marquardt
726d8321e8 Merge pull request 'Fix (half) empty cache issue' (#17) from bugfix/large-files-are-empty into main
Reviewed-on: https://codeberg.org/Codeberg/pages-server/pulls/17
2021-12-02 20:35:22 +01:00
Moritz Marquardt
989d00832f Fix (half) empty cache issue 2021-12-02 19:11:13 +01:00
Moritz Marquardt
26dd1591f6 Add www redirect for main domain 2021-12-02 00:00:00 +01:00
Moritz Marquardt
a48ba8ee49 Run gofmt 2021-12-01 22:59:52 +01:00
Moritz Marquardt
fedfa8def4 Remove REDIRECT_BROKEN_DNS page and instead use "Failed Dependency" error 2021-12-01 22:59:38 +01:00
Moritz Marquardt
6c85b8a166 Merge pull request 'Handle certificate errors with mock certificates (fixes #10)' (#12) from feature/certificate-error-handling into main
Reviewed-on: https://codeberg.org/Codeberg/pages-server/pulls/12
2021-12-01 22:53:33 +01:00
Moritz Marquardt
544b3f7321 (Ab)use CSR field to store try-again date for renewals (instead of showing a mock cert), must be tested when the first renewals are due 2021-12-01 22:49:48 +01:00
Moritz Marquardt
f29ebc57d3 Handle certificate errors with mock certificates (fixes #10) 2021-12-01 21:46:52 +01:00
Moritz Marquardt
5b6e3748b4 Merge pull request 'Pass Gitea API token to requests' (#9) from fnetX/codeberg-pages-server:main into main
Reviewed-on: https://codeberg.org/Codeberg/pages-server/pulls/9
2021-12-01 21:46:28 +01:00
Moritz Marquardt
6d520c2a40 Update error message for private repo access 2021-12-01 21:44:54 +01:00
fnetx
73da80adc1 Switch GiteaApiToken from byte to string 2021-11-26 17:10:31 +01:00
fnetx
455f65216c Remove access token from canonicalLink as per momar 2021-11-26 17:03:58 +01:00
fnetx
772c17e214 Pass Gitea API token to requests
This allows to display repos that aren't fully public. Some users seem
to be very interested in not having their pages viewable, and it might
make even sense to avoid e.g. search engines to read them.
If set to some random user string, this could allow to set the
visibility at least to limited (so only logged users see the repo), and
should allow to view private repos in the future with another API token.
2021-11-26 04:19:24 +01:00
Moritz Marquardt
418dbb7315 Merge pull request 'Refactor Code' (#8) from 6543/codeberg-pages:refactor into main
Reviewed-on: https://codeberg.org/Codeberg/pages-server/pulls/8
2021-11-25 19:16:20 +01:00
6543
a6da3eb5f0 ignore vendor/ folder 2021-11-25 16:19:04 +01:00
6543
f2bb6aa36f fix comment 2021-11-25 16:18:28 +01:00
6543
e800d2110e gofmt -s -w *.go */*.go 2021-11-25 16:12:28 +01:00
Moritz Marquardt
5ed8d0f129 Add clarification on cache & reduce default branch cache to 15 minutes 2021-11-24 19:09:37 +01:00
Moritz Marquardt
e5385be6da Fix iterator issue causing 100% CPU load 2021-11-24 19:08:34 +01:00
9 changed files with 360 additions and 185 deletions

1
.gitignore vendored
View File

@@ -3,3 +3,4 @@
key-database.pogreb/ key-database.pogreb/
acme-account.json acme-account.json
build/ build/
vendor/

View File

@@ -4,7 +4,7 @@
- `PAGES_DOMAIN` (default: `codeberg.page`): main domain for pages. - `PAGES_DOMAIN` (default: `codeberg.page`): main domain for pages.
- `RAW_DOMAIN` (default: `raw.codeberg.org`): domain for raw resources. - `RAW_DOMAIN` (default: `raw.codeberg.org`): domain for raw resources.
- `GITEA_ROOT` (default: `https://codeberg.org`): root of the upstream Gitea instance. - `GITEA_ROOT` (default: `https://codeberg.org`): root of the upstream Gitea instance.
- `REDIRECT_BROKEN_DNS` (default: https://docs.codeberg.org/pages/custom-domains/): info page for setting up DNS, shown for invalid DNS setups. - `GITEA_API_TOKEN` (default: empty): API token for the Gitea instance to access non-public (e.g. limited) repos.
- `REDIRECT_RAW_INFO` (default: https://docs.codeberg.org/pages/raw-content/): info page for raw resources, shown if no resource is provided. - `REDIRECT_RAW_INFO` (default: https://docs.codeberg.org/pages/raw-content/): info page for raw resources, shown if no resource is provided.
- `ACME_API` (default: https://acme-v02.api.letsencrypt.org/directory): set this to https://acme.mock.director to use invalid certificates without any verification (great for debugging). - `ACME_API` (default: https://acme-v02.api.letsencrypt.org/directory): set this to https://acme.mock.director to use invalid certificates without any verification (great for debugging).
ZeroSSL might be better in the future as it doesn't have rate limits and doesn't clash with the official Codeberg certificates (which are using Let's Encrypt), but I couldn't get it to work yet. ZeroSSL might be better in the future as it doesn't have rate limits and doesn't clash with the official Codeberg certificates (which are using Let's Encrypt), but I couldn't get it to work yet.

View File

@@ -6,21 +6,25 @@ import (
"crypto/ecdsa" "crypto/ecdsa"
"crypto/elliptic" "crypto/elliptic"
"crypto/rand" "crypto/rand"
"crypto/rsa"
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"crypto/x509/pkix"
"encoding/gob" "encoding/gob"
"encoding/json" "encoding/json"
"encoding/pem"
"errors" "errors"
"github.com/OrlovEvgeny/go-mcache" "github.com/OrlovEvgeny/go-mcache"
"github.com/akrylysov/pogreb/fs" "github.com/akrylysov/pogreb/fs"
"github.com/go-acme/lego/v4/certificate" "github.com/go-acme/lego/v4/certificate"
"github.com/go-acme/lego/v4/challenge" "github.com/go-acme/lego/v4/challenge"
"github.com/go-acme/lego/v4/challenge/resolver"
"github.com/go-acme/lego/v4/challenge/tlsalpn01" "github.com/go-acme/lego/v4/challenge/tlsalpn01"
"github.com/go-acme/lego/v4/providers/dns" "github.com/go-acme/lego/v4/providers/dns"
"io/ioutil" "io/ioutil"
"log" "log"
"math/big"
"os" "os"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@@ -101,7 +105,7 @@ var tlsConfig = &tls.Config{
} }
} }
err = keyCache.Set(sni, &tlsCertificate, 15 * time.Minute) err = keyCache.Set(sni, &tlsCertificate, 15*time.Minute)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -127,13 +131,17 @@ var tlsConfig = &tls.Config{
} }
var keyCache = mcache.New() var keyCache = mcache.New()
var keyDatabase *pogreb.DB var keyDatabase, keyDatabaseErr = pogreb.Open("key-database.pogreb", &pogreb.Options{
BackgroundSyncInterval: 30 * time.Second,
BackgroundCompactionInterval: 6 * time.Hour,
FileSystem: fs.OSMMap,
})
func CheckUserLimit(user string) (error) { func CheckUserLimit(user string) error {
userLimit, ok := acmeClientCertificateLimitPerUser[user] userLimit, ok := acmeClientCertificateLimitPerUser[user]
if !ok { if !ok {
// Each Codeberg user can only add 10 new domains per day. // Each Codeberg user can only add 10 new domains per day.
userLimit = equalizer.NewTokenBucket(10, time.Hour * 24) userLimit = equalizer.NewTokenBucket(10, time.Hour*24)
acmeClientCertificateLimitPerUser[user] = userLimit acmeClientCertificateLimitPerUser[user] = userLimit
} }
if !userLimit.Ask() { if !userLimit.Ask() {
@@ -149,8 +157,9 @@ type AcmeAccount struct {
Email string Email string
Registration *registration.Resource Registration *registration.Resource
Key crypto.PrivateKey `json:"-"` Key crypto.PrivateKey `json:"-"`
KeyPEM string `json:"Key"` KeyPEM string `json:"Key"`
} }
func (u *AcmeAccount) GetEmail() string { func (u *AcmeAccount) GetEmail() string {
return u.Email return u.Email
} }
@@ -161,31 +170,22 @@ func (u *AcmeAccount) GetPrivateKey() crypto.PrivateKey {
return u.Key return u.Key
} }
func newAcmeClient(configureChallenge func(*resolver.SolverManager) error) *lego.Client {
acmeClient, err := lego.NewClient(myAcmeConfig)
if err != nil {
panic(err)
}
err = configureChallenge(acmeClient.Challenge)
if err != nil {
panic(err)
}
return acmeClient
}
var acmeClient, mainDomainAcmeClient *lego.Client var acmeClient, mainDomainAcmeClient *lego.Client
var acmeClientCertificateLimitPerUser = map[string]*equalizer.TokenBucket{} var acmeClientCertificateLimitPerUser = map[string]*equalizer.TokenBucket{}
// rate limit is 300 / 3 hours, we want 200 / 2 hours but to refill more often, so that's 25 new domains every 15 minutes // rate limit is 300 / 3 hours, we want 200 / 2 hours but to refill more often, so that's 25 new domains every 15 minutes
// TODO: when this is used a lot, we probably have to think of a somewhat better solution? // TODO: when this is used a lot, we probably have to think of a somewhat better solution?
var acmeClientOrderLimit = equalizer.NewTokenBucket(25, 15 * time.Minute) var acmeClientOrderLimit = equalizer.NewTokenBucket(25, 15*time.Minute)
// rate limit is 20 / second, we want 10 / second // rate limit is 20 / second, we want 5 / second (especially as one cert takes at least two requests)
var acmeClientRequestLimit = equalizer.NewTokenBucket(10, 1 * time.Second) var acmeClientRequestLimit = equalizer.NewTokenBucket(5, 1*time.Second)
var challengeCache = mcache.New() var challengeCache = mcache.New()
type AcmeTLSChallengeProvider struct{} type AcmeTLSChallengeProvider struct{}
var _ challenge.Provider = AcmeTLSChallengeProvider{} var _ challenge.Provider = AcmeTLSChallengeProvider{}
func (a AcmeTLSChallengeProvider) Present(domain, _, keyAuth string) error { func (a AcmeTLSChallengeProvider) Present(domain, _, keyAuth string) error {
return challengeCache.Set(domain, keyAuth, 1*time.Hour) return challengeCache.Set(domain, keyAuth, 1*time.Hour)
} }
@@ -193,10 +193,13 @@ func (a AcmeTLSChallengeProvider) CleanUp(domain, _, _ string) error {
challengeCache.Remove(domain) challengeCache.Remove(domain)
return nil return nil
} }
type AcmeHTTPChallengeProvider struct{} type AcmeHTTPChallengeProvider struct{}
var _ challenge.Provider = AcmeHTTPChallengeProvider{} var _ challenge.Provider = AcmeHTTPChallengeProvider{}
func (a AcmeHTTPChallengeProvider) Present(domain, token, keyAuth string) error { func (a AcmeHTTPChallengeProvider) Present(domain, token, keyAuth string) error {
return challengeCache.Set(domain + "/" + token, keyAuth, 1*time.Hour) return challengeCache.Set(domain+"/"+token, keyAuth, 1*time.Hour)
} }
func (a AcmeHTTPChallengeProvider) CleanUp(domain, token, _ string) error { func (a AcmeHTTPChallengeProvider) CleanUp(domain, token, _ string) error {
challengeCache.Remove(domain + "/" + token) challengeCache.Remove(domain + "/" + token)
@@ -205,21 +208,9 @@ func (a AcmeHTTPChallengeProvider) CleanUp(domain, token, _ string) error {
func retrieveCertFromDB(sni []byte) (tls.Certificate, bool) { func retrieveCertFromDB(sni []byte) (tls.Certificate, bool) {
// parse certificate from database // parse certificate from database
resBytes, err := keyDatabase.Get(sni)
if err != nil {
// key database is not working
panic(err)
}
if resBytes == nil {
return tls.Certificate{}, false
}
resGob := bytes.NewBuffer(resBytes)
resDec := gob.NewDecoder(resGob)
res := &certificate.Resource{} res := &certificate.Resource{}
err = resDec.Decode(res) if !PogrebGet(keyDatabase, sni, res) {
if err != nil { return tls.Certificate{}, false
panic(err)
} }
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey) tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
@@ -235,7 +226,15 @@ func retrieveCertFromDB(sni []byte) (tls.Certificate, bool) {
// renew certificates 7 days before they expire // renew certificates 7 days before they expire
if !tlsCertificate.Leaf.NotAfter.After(time.Now().Add(-7 * 24 * time.Hour)) { if !tlsCertificate.Leaf.NotAfter.After(time.Now().Add(-7 * 24 * time.Hour)) {
if res.CSR != nil && len(res.CSR) > 0 {
// CSR stores the time when the renewal shall be tried again
nextTryUnix, err := strconv.ParseInt(string(res.CSR), 10, 64)
if err == nil && time.Now().Before(time.Unix(nextTryUnix, 0)) {
return tlsCertificate, true
}
}
go (func() { go (func() {
res.CSR = nil // acme client doesn't like CSR to be set
tlsCertificate, err = obtainCert(acmeClient, []string{string(sni)}, res, "") tlsCertificate, err = obtainCert(acmeClient, []string{string(sni)}, res, "")
if err != nil { if err != nil {
log.Printf("Couldn't renew certificate for %s: %s", sni, err) log.Printf("Couldn't renew certificate for %s: %s", sni, err)
@@ -248,6 +247,7 @@ func retrieveCertFromDB(sni []byte) (tls.Certificate, bool) {
} }
var obtainLocks = sync.Map{} var obtainLocks = sync.Map{}
func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Resource, user string) (tls.Certificate, error) { func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Resource, user string) (tls.Certificate, error) {
name := strings.TrimPrefix(domains[0], "*") name := strings.TrimPrefix(domains[0], "*")
if os.Getenv("DNS_PROVIDER") == "" && len(domains[0]) > 0 && domains[0][0] == '*' { if os.Getenv("DNS_PROVIDER") == "" && len(domains[0]) > 0 && domains[0][0] == '*' {
@@ -269,16 +269,25 @@ func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Re
} }
defer obtainLocks.Delete(name) defer obtainLocks.Delete(name)
if acmeClient == nil {
return mockCert(domains[0], "ACME client uninitialized. This is a server error, please report!"), nil
}
// request actual cert // request actual cert
var res *certificate.Resource var res *certificate.Resource
var err error var err error
if renew != nil { if renew != nil && renew.CertURL != "" {
if os.Getenv("ACME_USE_RATE_LIMITS") != "false" { if os.Getenv("ACME_USE_RATE_LIMITS") != "false" {
acmeClientRequestLimit.Take() acmeClientRequestLimit.Take()
} }
log.Printf("Renewing certificate for %v", domains) log.Printf("Renewing certificate for %v", domains)
res, err = acmeClient.Certificate.Renew(*renew, true, false, "") res, err = acmeClient.Certificate.Renew(*renew, true, false, "")
} else { if err != nil {
log.Printf("Couldn't renew certificate for %v, trying to request a new one: %s", domains, err)
res = nil
}
}
if res == nil {
if user != "" { if user != "" {
if err := CheckUserLimit(user); err != nil { if err := CheckUserLimit(user); err != nil {
return tls.Certificate{}, err return tls.Certificate{}, err
@@ -298,23 +307,21 @@ func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Re
} }
if err != nil { if err != nil {
log.Printf("Couldn't obtain certificate for %v: %s", domains, err) log.Printf("Couldn't obtain certificate for %v: %s", domains, err)
return tls.Certificate{}, err if renew != nil && renew.CertURL != "" {
tlsCertificate, err := tls.X509KeyPair(renew.Certificate, renew.PrivateKey)
if err == nil && tlsCertificate.Leaf.NotAfter.After(time.Now()) {
// avoid sending a mock cert instead of a still valid cert, instead abuse CSR field to store time to try again at
renew.CSR = []byte(strconv.FormatInt(time.Now().Add(6*time.Hour).Unix(), 10))
PogrebPut(keyDatabase, []byte(name), renew)
return tlsCertificate, nil
}
} else {
return mockCert(domains[0], err.Error()), err
}
} }
log.Printf("Obtained certificate for %v", domains) log.Printf("Obtained certificate for %v", domains)
var resGob bytes.Buffer PogrebPut(keyDatabase, []byte(name), res)
resEnc := gob.NewEncoder(&resGob)
err = resEnc.Encode(res)
if err != nil {
panic(err)
}
err = keyDatabase.Put([]byte(name), resGob.Bytes())
if err != nil {
_ = keyDatabase.Delete([]byte(name + "/key"))
obtainLocks.Delete(name)
panic(err)
}
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey) tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
if err != nil { if err != nil {
return tls.Certificate{}, err return tls.Certificate{}, err
@@ -322,21 +329,88 @@ func obtainCert(acmeClient *lego.Client, domains []string, renew *certificate.Re
return tlsCertificate, nil return tlsCertificate, nil
} }
func setupCertificates() { func mockCert(domain string, msg string) tls.Certificate {
var err error key, err := certcrypto.GeneratePrivateKey(certcrypto.RSA2048)
keyDatabase, err = pogreb.Open("key-database.pogreb", &pogreb.Options{
BackgroundSyncInterval: 30 * time.Second,
BackgroundCompactionInterval: 6 * time.Hour,
FileSystem: fs.OSMMap,
})
if err != nil { if err != nil {
panic(err) panic(err)
} }
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: domain,
Organization: []string{"Codeberg Pages Error Certificate (couldn't obtain ACME certificate)"},
OrganizationalUnit: []string{
"Will not try again for 6 hours to avoid hitting rate limits for your domain.",
"Check https://docs.codeberg.org/codeberg-pages/troubleshooting/ for troubleshooting tips, and feel " +
"free to create an issue at https://codeberg.org/Codeberg/pages-server if you can't solve it.\n",
"Error message: " + msg,
},
},
// certificates younger than 7 days are renewed, so this enforces the cert to not be renewed for a 6 hours
NotAfter: time.Now().Add(time.Hour*24*7 + time.Hour*6),
NotBefore: time.Now(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
certBytes, err := x509.CreateCertificate(
rand.Reader,
&template,
&template,
&key.(*rsa.PrivateKey).PublicKey,
key,
)
if err != nil {
panic(err)
}
out := &bytes.Buffer{}
err = pem.Encode(out, &pem.Block{
Bytes: certBytes,
Type: "CERTIFICATE",
})
if err != nil {
panic(err)
}
outBytes := out.Bytes()
res := &certificate.Resource{
PrivateKey: certcrypto.PEMEncode(key),
Certificate: outBytes,
IssuerCertificate: outBytes,
Domain: domain,
}
databaseName := domain
if domain == "*"+string(MainDomainSuffix) || domain == string(MainDomainSuffix[1:]) {
databaseName = string(MainDomainSuffix)
}
PogrebPut(keyDatabase, []byte(databaseName), res)
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
if err != nil {
panic(err)
}
return tlsCertificate
}
func setupCertificates() {
if keyDatabaseErr != nil {
panic(keyDatabaseErr)
}
if os.Getenv("ACME_ACCEPT_TERMS") != "true" || (os.Getenv("DNS_PROVIDER") == "" && os.Getenv("ACME_API") != "https://acme.mock.directory") { if os.Getenv("ACME_ACCEPT_TERMS") != "true" || (os.Getenv("DNS_PROVIDER") == "" && os.Getenv("ACME_API") != "https://acme.mock.directory") {
panic(errors.New("you must set ACME_ACCEPT_TERMS and DNS_PROVIDER, unless ACME_API is set to https://acme.mock.directory")) panic(errors.New("you must set ACME_ACCEPT_TERMS and DNS_PROVIDER, unless ACME_API is set to https://acme.mock.directory"))
} }
// getting main cert before ACME account so that we can panic here on database failure without hitting rate limits
mainCertBytes, err := keyDatabase.Get(MainDomainSuffix)
if err != nil {
// key database is not working
panic(err)
}
if account, err := ioutil.ReadFile("acme-account.json"); err == nil { if account, err := ioutil.ReadFile("acme-account.json"); err == nil {
err = json.Unmarshal(account, &myAcmeAccount) err = json.Unmarshal(account, &myAcmeAccount)
if err != nil { if err != nil {
@@ -349,83 +423,107 @@ func setupCertificates() {
myAcmeConfig = lego.NewConfig(&myAcmeAccount) myAcmeConfig = lego.NewConfig(&myAcmeAccount)
myAcmeConfig.CADirURL = envOr("ACME_API", "https://acme-v02.api.letsencrypt.org/directory") myAcmeConfig.CADirURL = envOr("ACME_API", "https://acme-v02.api.letsencrypt.org/directory")
myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048 myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
newAcmeClient(func(manager *resolver.SolverManager) error { return nil }) _, err := lego.NewClient(myAcmeConfig)
if err != nil {
log.Printf("[ERROR] Can't create ACME client, continuing with mock certs only: %s", err)
}
} else if os.IsNotExist(err) { } else if os.IsNotExist(err) {
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil { if err != nil {
panic(err) panic(err)
} }
myAcmeAccount = AcmeAccount{ myAcmeAccount = AcmeAccount{
Email: envOr("ACME_EMAIL", "noreply@example.email"), Email: envOr("ACME_EMAIL", "noreply@example.email"),
Key: privateKey, Key: privateKey,
KeyPEM: string(certcrypto.PEMEncode(privateKey)), KeyPEM: string(certcrypto.PEMEncode(privateKey)),
} }
myAcmeConfig = lego.NewConfig(&myAcmeAccount) myAcmeConfig = lego.NewConfig(&myAcmeAccount)
myAcmeConfig.CADirURL = envOr("ACME_API", "https://acme-v02.api.letsencrypt.org/directory") myAcmeConfig.CADirURL = envOr("ACME_API", "https://acme-v02.api.letsencrypt.org/directory")
myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048 myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
tempClient := newAcmeClient(func(manager *resolver.SolverManager) error { return nil }) tempClient, err := lego.NewClient(myAcmeConfig)
if err != nil {
// accept terms & log in to EAB log.Printf("[ERROR] Can't create ACME client, continuing with mock certs only: %s", err)
if os.Getenv("ACME_EAB_KID") == "" || os.Getenv("ACME_EAB_HMAC") == "" {
reg, err := tempClient.Registration.Register(registration.RegisterOptions{TermsOfServiceAgreed: os.Getenv("ACME_ACCEPT_TERMS") == "true"})
if err != nil {
panic(err)
}
myAcmeAccount.Registration = reg
} else { } else {
reg, err := tempClient.Registration.RegisterWithExternalAccountBinding(registration.RegisterEABOptions{ // accept terms & log in to EAB
TermsOfServiceAgreed: os.Getenv("ACME_ACCEPT_TERMS") == "true", if os.Getenv("ACME_EAB_KID") == "" || os.Getenv("ACME_EAB_HMAC") == "" {
Kid: os.Getenv("ACME_EAB_KID"), reg, err := tempClient.Registration.Register(registration.RegisterOptions{TermsOfServiceAgreed: os.Getenv("ACME_ACCEPT_TERMS") == "true"})
HmacEncoded: os.Getenv("ACME_EAB_HMAC"), if err != nil {
}) log.Printf("[ERROR] Can't register ACME account, continuing with mock certs only: %s", err)
if err != nil { } else {
panic(err) myAcmeAccount.Registration = reg
}
} else {
reg, err := tempClient.Registration.RegisterWithExternalAccountBinding(registration.RegisterEABOptions{
TermsOfServiceAgreed: os.Getenv("ACME_ACCEPT_TERMS") == "true",
Kid: os.Getenv("ACME_EAB_KID"),
HmacEncoded: os.Getenv("ACME_EAB_HMAC"),
})
if err != nil {
log.Printf("[ERROR] Can't register ACME account, continuing with mock certs only: %s", err)
} else {
myAcmeAccount.Registration = reg
}
} }
myAcmeAccount.Registration = reg
}
acmeAccountJson, err := json.Marshal(myAcmeAccount) if myAcmeAccount.Registration != nil {
if err != nil { acmeAccountJson, err := json.Marshal(myAcmeAccount)
panic(err) if err != nil {
} log.Printf("[FAIL] Error during json.Marshal(myAcmeAccount), waiting for manual restart to avoid rate limits: %s", err)
err = ioutil.WriteFile("acme-account.json", acmeAccountJson, 0600) select {}
if err != nil { }
panic(err) err = ioutil.WriteFile("acme-account.json", acmeAccountJson, 0600)
if err != nil {
log.Printf("[FAIL] Error during ioutil.WriteFile(\"acme-account.json\"), waiting for manual restart to avoid rate limits: %s", err)
select {}
}
}
} }
} else { } else {
panic(err) panic(err)
} }
acmeClient = newAcmeClient(func(challenge *resolver.SolverManager) error { acmeClient, err = lego.NewClient(myAcmeConfig)
err = challenge.SetTLSALPN01Provider(AcmeTLSChallengeProvider{}) if err != nil {
log.Printf("[ERROR] Can't create ACME client, continuing with mock certs only: %s", err)
} else {
err = acmeClient.Challenge.SetTLSALPN01Provider(AcmeTLSChallengeProvider{})
if err != nil { if err != nil {
return err log.Printf("[ERROR] Can't create TLS-ALPN-01 provider: %s", err)
} }
if os.Getenv("ENABLE_HTTP_SERVER") == "true" { if os.Getenv("ENABLE_HTTP_SERVER") == "true" {
return challenge.SetHTTP01Provider(AcmeHTTPChallengeProvider{}) err = acmeClient.Challenge.SetHTTP01Provider(AcmeHTTPChallengeProvider{})
if err != nil {
log.Printf("[ERROR] Can't create HTTP-01 provider: %s", err)
}
} }
return err }
})
mainDomainAcmeClient = newAcmeClient(func(challenge *resolver.SolverManager) error { mainDomainAcmeClient, err = lego.NewClient(myAcmeConfig)
if err != nil {
log.Printf("[ERROR] Can't create ACME client, continuing with mock certs only: %s", err)
} else {
if os.Getenv("DNS_PROVIDER") == "" { if os.Getenv("DNS_PROVIDER") == "" {
// using mock server, don't use wildcard certs // using mock server, don't use wildcard certs
return challenge.SetTLSALPN01Provider(AcmeTLSChallengeProvider{}) err := mainDomainAcmeClient.Challenge.SetTLSALPN01Provider(AcmeTLSChallengeProvider{})
if err != nil {
log.Printf("[ERROR] Can't create TLS-ALPN-01 provider: %s", err)
}
} else {
provider, err := dns.NewDNSChallengeProviderByName(os.Getenv("DNS_PROVIDER"))
if err != nil {
log.Printf("[ERROR] Can't create DNS Challenge provider: %s", err)
}
err = mainDomainAcmeClient.Challenge.SetDNS01Provider(provider)
if err != nil {
log.Printf("[ERROR] Can't create DNS-01 provider: %s", err)
}
} }
provider, err := dns.NewDNSChallengeProviderByName(os.Getenv("DNS_PROVIDER")) }
if err != nil {
return err
}
return challenge.SetDNS01Provider(provider)
})
resBytes, err := keyDatabase.Get(MainDomainSuffix) if mainCertBytes == nil {
if err != nil {
// key database is not working
panic(err)
} else if resBytes == nil {
_, err = obtainCert(mainDomainAcmeClient, []string{"*" + string(MainDomainSuffix), string(MainDomainSuffix[1:])}, nil, "") _, err = obtainCert(mainDomainAcmeClient, []string{"*" + string(MainDomainSuffix), string(MainDomainSuffix[1:])}, nil, "")
if err != nil { if err != nil {
log.Fatalf("Couldn't renew certificate for *%s: %s", MainDomainSuffix, err) log.Printf("[ERROR] Couldn't renew main domain certificate, continuing with mock certs only: %s", err)
} }
} }
@@ -433,7 +531,7 @@ func setupCertificates() {
for { for {
err := keyDatabase.Sync() err := keyDatabase.Sync()
if err != nil { if err != nil {
log.Printf("Syncinc key database failed: %s", err) log.Printf("[ERROR] Syncinc key database failed: %s", err)
} }
time.Sleep(5 * time.Minute) time.Sleep(5 * time.Minute)
} }
@@ -443,7 +541,8 @@ func setupCertificates() {
// clean up expired certs // clean up expired certs
now := time.Now() now := time.Now()
expiredCertCount := 0 expiredCertCount := 0
key, resBytes, err := keyDatabase.Items().Next() keyDatabaseIterator := keyDatabase.Items()
key, resBytes, err := keyDatabaseIterator.Next()
for err == nil { for err == nil {
if !bytes.Equal(key, MainDomainSuffix) { if !bytes.Equal(key, MainDomainSuffix) {
resGob := bytes.NewBuffer(resBytes) resGob := bytes.NewBuffer(resBytes)
@@ -458,49 +557,40 @@ func setupCertificates() {
if err != nil || !tlsCertificates[0].NotAfter.After(now) { if err != nil || !tlsCertificates[0].NotAfter.After(now) {
err := keyDatabase.Delete(key) err := keyDatabase.Delete(key)
if err != nil { if err != nil {
log.Printf("Deleting expired certificate for %s failed: %s", string(key), err) log.Printf("[ERROR] Deleting expired certificate for %s failed: %s", string(key), err)
} else { } else {
expiredCertCount++ expiredCertCount++
} }
} }
} }
key, resBytes, err = keyDatabase.Items().Next() key, resBytes, err = keyDatabaseIterator.Next()
} }
log.Printf("Removed %d expired certificates from the database", expiredCertCount) log.Printf("[INFO] Removed %d expired certificates from the database", expiredCertCount)
// compact the database // compact the database
result, err := keyDatabase.Compact() result, err := keyDatabase.Compact()
if err != nil { if err != nil {
log.Printf("Compacting key database failed: %s", err) log.Printf("[ERROR] Compacting key database failed: %s", err)
} else { } else {
log.Printf("Compacted key database (%+v)", result) log.Printf("[INFO] Compacted key database (%+v)", result)
} }
// update main cert // update main cert
resBytes, err = keyDatabase.Get(MainDomainSuffix)
if err != nil {
// key database is not working
panic(err)
}
resGob := bytes.NewBuffer(resBytes)
resDec := gob.NewDecoder(resGob)
res := &certificate.Resource{} res := &certificate.Resource{}
err = resDec.Decode(res) if !PogrebGet(keyDatabase, MainDomainSuffix, res) {
if err != nil { log.Printf("[ERROR] Couldn't renew certificate for main domain: %s", "expected main domain cert to exist, but it's missing - seems like the database is corrupted")
panic(err) } else {
} tlsCertificates, err := certcrypto.ParsePEMBundle(res.Certificate)
tlsCertificates, err := certcrypto.ParsePEMBundle(res.Certificate) // renew main certificate 30 days before it expires
if !tlsCertificates[0].NotAfter.After(time.Now().Add(-30 * 24 * time.Hour)) {
// renew main certificate 30 days before it expires go (func() {
if !tlsCertificates[0].NotAfter.After(time.Now().Add(-30 * 24 * time.Hour)) { _, err = obtainCert(mainDomainAcmeClient, []string{"*" + string(MainDomainSuffix), string(MainDomainSuffix[1:])}, res, "")
go (func() { if err != nil {
_, err = obtainCert(mainDomainAcmeClient, []string{"*" + string(MainDomainSuffix), string(MainDomainSuffix[1:])}, res, "") log.Printf("[ERROR] Couldn't renew certificate for main domain: %s", err)
if err != nil { }
log.Printf("Couldn't renew certificate for *%s: %s", MainDomainSuffix, err) })()
} }
})()
} }
time.Sleep(12 * time.Hour) time.Sleep(12 * time.Hour)

View File

@@ -14,9 +14,9 @@ var Logger = func(s string, i ...interface{}) {
} }
type Stepper struct { type Stepper struct {
Name string Name string
Start time.Time Start time.Time
LastStep time.Time LastStep time.Time
Completion time.Time Completion time.Time
} }
@@ -27,8 +27,8 @@ func Start(name string) *Stepper {
t := time.Now() t := time.Now()
Logger("%s: started at %s\n", name, t.Format(time.RFC3339)) Logger("%s: started at %s\n", name, t.Format(time.RFC3339))
return &Stepper{ return &Stepper{
Name: name, Name: name,
Start: t, Start: t,
LastStep: t, LastStep: t,
} }
} }

View File

@@ -9,7 +9,8 @@ import (
) )
// DnsLookupCacheTimeout specifies the timeout for the DNS lookup cache. // DnsLookupCacheTimeout specifies the timeout for the DNS lookup cache.
var DnsLookupCacheTimeout = 15*time.Minute var DnsLookupCacheTimeout = 15 * time.Minute
// dnsLookupCache stores DNS lookups for custom domains // dnsLookupCache stores DNS lookups for custom domains
var dnsLookupCache = mcache.New() var dnsLookupCache = mcache.New()
@@ -61,9 +62,9 @@ func getTargetFromDNS(domain string) (targetOwner, targetRepo, targetBranch stri
return return
} }
// CanonicalDomainCacheTimeout specifies the timeout for the canonical domain cache. // CanonicalDomainCacheTimeout specifies the timeout for the canonical domain cache.
var CanonicalDomainCacheTimeout = 15*time.Minute var CanonicalDomainCacheTimeout = 15 * time.Minute
// canonicalDomainCache stores canonical domains // canonicalDomainCache stores canonical domains
var canonicalDomainCache = mcache.New() var canonicalDomainCache = mcache.New()
@@ -80,7 +81,7 @@ func checkCanonicalDomain(targetOwner, targetRepo, targetBranch, actualDomain st
} }
} else { } else {
req := fasthttp.AcquireRequest() req := fasthttp.AcquireRequest()
req.SetRequestURI(string(GiteaRoot) + "/api/v1/repos/" + targetOwner + "/" + targetRepo + "/raw/" + targetBranch + "/.domains") req.SetRequestURI(string(GiteaRoot) + "/api/v1/repos/" + targetOwner + "/" + targetRepo + "/raw/" + targetBranch + "/.domains" + "?access_token=" + GiteaApiToken)
res := fasthttp.AcquireResponse() res := fasthttp.AcquireResponse()
err := upstreamClient.Do(req, res) err := upstreamClient.Do(req, res)
@@ -98,14 +99,14 @@ func checkCanonicalDomain(targetOwner, targetRepo, targetBranch, actualDomain st
} }
} }
} }
domains = append(domains, targetOwner + string(MainDomainSuffix)) domains = append(domains, targetOwner+string(MainDomainSuffix))
if domains[len(domains) - 1] == actualDomain { if domains[len(domains)-1] == actualDomain {
valid = true valid = true
} }
if targetRepo != "" && targetRepo != "pages" { if targetRepo != "" && targetRepo != "pages" {
domains[len(domains) - 1] += "/" + targetRepo domains[len(domains)-1] += "/" + targetRepo
} }
_ = canonicalDomainCache.Set(targetOwner + "/" + targetRepo + "/" + targetBranch, domains, CanonicalDomainCacheTimeout) _ = canonicalDomainCache.Set(targetOwner+"/"+targetRepo+"/"+targetBranch, domains, CanonicalDomainCacheTimeout)
} }
canonicalDomain = domains[0] canonicalDomain = domains[0]
return return

View File

@@ -25,8 +25,8 @@ func handler(ctx *fasthttp.RequestCtx) {
// Force new default from specification (since November 2020) - see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy#strict-origin-when-cross-origin // Force new default from specification (since November 2020) - see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy#strict-origin-when-cross-origin
ctx.Response.Header.Set("Referrer-Policy", "strict-origin-when-cross-origin") ctx.Response.Header.Set("Referrer-Policy", "strict-origin-when-cross-origin")
// Enable caching, but require revalidation to reduce confusion // Enable browser caching for up to 10 minutes
ctx.Response.Header.Set("Cache-Control", "must-revalidate") ctx.Response.Header.Set("Cache-Control", "public, max-age=600")
trimmedHost := TrimHostPort(ctx.Request.Host()) trimmedHost := TrimHostPort(ctx.Request.Host())
@@ -118,7 +118,7 @@ func handler(ctx *fasthttp.RequestCtx) {
if targetRepo != "pages" { if targetRepo != "pages" {
canonicalPath = "/" + strings.SplitN(canonicalPath, "/", 3)[2] canonicalPath = "/" + strings.SplitN(canonicalPath, "/", 3)[2]
} }
ctx.Redirect("https://" + canonicalDomain + canonicalPath, fasthttp.StatusTemporaryRedirect) ctx.Redirect("https://"+canonicalDomain+canonicalPath, fasthttp.StatusTemporaryRedirect)
return return
} }
} }
@@ -180,12 +180,18 @@ func handler(ctx *fasthttp.RequestCtx) {
targetRepo = pathElements[0] targetRepo = pathElements[0]
targetPath = strings.Trim(strings.Join(pathElements[1:], "/"), "/") targetPath = strings.Trim(strings.Join(pathElements[1:], "/"), "/")
if targetOwner == "www" {
// www.codeberg.page redirects to codeberg.page
ctx.Redirect("https://" + string(MainDomainSuffix[1:]) + string(ctx.Path()), fasthttp.StatusPermanentRedirect)
return
}
// Check if the first directory is a repo with the second directory as a branch // Check if the first directory is a repo with the second directory as a branch
// example.codeberg.page/myrepo/@main/index.html // example.codeberg.page/myrepo/@main/index.html
if len(pathElements) > 1 && strings.HasPrefix(pathElements[1], "@") { if len(pathElements) > 1 && strings.HasPrefix(pathElements[1], "@") {
if targetRepo == "pages" { if targetRepo == "pages" {
// example.codeberg.org/pages/@... redirects to example.codeberg.org/@... // example.codeberg.org/pages/@... redirects to example.codeberg.org/@...
ctx.Redirect("/" + strings.Join(pathElements[1:], "/"), fasthttp.StatusTemporaryRedirect) ctx.Redirect("/"+strings.Join(pathElements[1:], "/"), fasthttp.StatusTemporaryRedirect)
return return
} }
@@ -242,7 +248,7 @@ func handler(ctx *fasthttp.RequestCtx) {
// Serve pages from external domains // Serve pages from external domains
targetOwner, targetRepo, targetBranch = getTargetFromDNS(trimmedHostStr) targetOwner, targetRepo, targetBranch = getTargetFromDNS(trimmedHostStr)
if targetOwner == "" { if targetOwner == "" {
ctx.Redirect(BrokenDNSPage, fasthttp.StatusTemporaryRedirect) returnErrorPage(ctx, fasthttp.StatusFailedDependency)
return return
} }
@@ -268,7 +274,7 @@ func handler(ctx *fasthttp.RequestCtx) {
ctx.Redirect("https://"+canonicalDomain+string(ctx.RequestURI()), fasthttp.StatusTemporaryRedirect) ctx.Redirect("https://"+canonicalDomain+string(ctx.RequestURI()), fasthttp.StatusTemporaryRedirect)
return return
} else { } else {
ctx.Redirect(BrokenDNSPage, fasthttp.StatusTemporaryRedirect) returnErrorPage(ctx, fasthttp.StatusFailedDependency)
return return
} }
} }
@@ -293,35 +299,42 @@ func returnErrorPage(ctx *fasthttp.RequestCtx, code int) {
message += " - domain not specified in <code>.domains</code> file" message += " - domain not specified in <code>.domains</code> file"
} }
if code == fasthttp.StatusFailedDependency { if code == fasthttp.StatusFailedDependency {
message += " - owner, repo or branch doesn't exist" message += " - target repo/branch doesn't exist or is private"
} }
ctx.Response.SetBody(bytes.ReplaceAll(NotFoundPage, []byte("%status"), []byte(strconv.Itoa(code)+" "+message))) ctx.Response.SetBody(bytes.ReplaceAll(NotFoundPage, []byte("%status"), []byte(strconv.Itoa(code)+" "+message)))
} }
// BranchExistanceCacheTimeout specifies the timeout for the default branch cache. It can be quite long. // DefaultBranchCacheTimeout specifies the timeout for the default branch cache. It can be quite long.
var DefaultBranchCacheTimeout = 1*time.Hour var DefaultBranchCacheTimeout = 15 * time.Minute
// BranchExistanceCacheTimeout specifies the timeout for the branch timestamp & existance cache. It should be shorter // BranchExistanceCacheTimeout specifies the timeout for the branch timestamp & existance cache. It should be shorter
// than FileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be // than FileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be
// picked up faster, while still allowing the content to be cached longer if nothing changes. // picked up faster, while still allowing the content to be cached longer if nothing changes.
var BranchExistanceCacheTimeout = 5*time.Minute var BranchExistanceCacheTimeout = 5 * time.Minute
// branchTimestampCache stores branch timestamps for faster cache checking // branchTimestampCache stores branch timestamps for faster cache checking
var branchTimestampCache = mcache.New() var branchTimestampCache = mcache.New()
type branchTimestamp struct { type branchTimestamp struct {
branch string branch string
timestamp time.Time timestamp time.Time
} }
// FileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending // FileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending
// on your available memory. // on your available memory.
var FileCacheTimeout = 5*time.Minute var FileCacheTimeout = 5 * time.Minute
// FileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default. // FileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default.
var FileCacheSizeLimit = 1024 * 1024 var FileCacheSizeLimit = 1024 * 1024
// fileResponseCache stores responses from the Gitea server // fileResponseCache stores responses from the Gitea server
// TODO: make this an MRU cache with a size limit
var fileResponseCache = mcache.New() var fileResponseCache = mcache.New()
type fileResponse struct { type fileResponse struct {
exists bool exists bool
mimeType string mimeType string
body []byte body []byte
} }
// getBranchTimestamp finds the default branch (if branch is "") and returns the last modification time of the branch // getBranchTimestamp finds the default branch (if branch is "") and returns the last modification time of the branch
@@ -338,30 +351,30 @@ func getBranchTimestamp(owner, repo, branch string) *branchTimestamp {
if branch == "" { if branch == "" {
// Get default branch // Get default branch
var body = make([]byte, 0) var body = make([]byte, 0)
status, body, err := fasthttp.GetTimeout(body, string(GiteaRoot)+"/api/v1/repos/"+owner+"/"+repo, 5 * time.Second) status, body, err := fasthttp.GetTimeout(body, string(GiteaRoot)+"/api/v1/repos/"+owner+"/"+repo+"?access_token="+GiteaApiToken, 5*time.Second)
if err != nil || status != 200 { if err != nil || status != 200 {
_ = branchTimestampCache.Set(owner + "/" + repo + "/" + branch, nil, DefaultBranchCacheTimeout) _ = branchTimestampCache.Set(owner+"/"+repo+"/"+branch, nil, DefaultBranchCacheTimeout)
return nil return nil
} }
result.branch = fastjson.GetString(body, "default_branch") result.branch = fastjson.GetString(body, "default_branch")
} }
var body = make([]byte, 0) var body = make([]byte, 0)
status, body, err := fasthttp.GetTimeout(body, string(GiteaRoot)+"/api/v1/repos/"+owner+"/"+repo+"/branches/"+branch, 5 * time.Second) status, body, err := fasthttp.GetTimeout(body, string(GiteaRoot)+"/api/v1/repos/"+owner+"/"+repo+"/branches/"+branch+"?access_token="+GiteaApiToken, 5*time.Second)
if err != nil || status != 200 { if err != nil || status != 200 {
return nil return nil
} }
result.timestamp, _ = time.Parse(time.RFC3339, fastjson.GetString(body, "commit", "timestamp")) result.timestamp, _ = time.Parse(time.RFC3339, fastjson.GetString(body, "commit", "timestamp"))
_ = branchTimestampCache.Set(owner + "/" + repo + "/" + branch, result, BranchExistanceCacheTimeout) _ = branchTimestampCache.Set(owner+"/"+repo+"/"+branch, result, BranchExistanceCacheTimeout)
return result return result
} }
var upstreamClient = fasthttp.Client{ var upstreamClient = fasthttp.Client{
ReadTimeout: 10 * time.Second, ReadTimeout: 10 * time.Second,
MaxConnDuration: 60 * time.Second, MaxConnDuration: 60 * time.Second,
MaxConnWaitTimeout: 1000 * time.Millisecond, MaxConnWaitTimeout: 1000 * time.Millisecond,
MaxConnsPerHost: 128 * 16, // TODO: adjust bottlenecks for best performance with Gitea! MaxConnsPerHost: 128 * 16, // TODO: adjust bottlenecks for best performance with Gitea!
} }
// upstream requests a file from the Gitea API at GiteaRoot and writes it to the request context. // upstream requests a file from the Gitea API at GiteaRoot and writes it to the request context.
@@ -405,11 +418,11 @@ func upstream(ctx *fasthttp.RequestCtx, targetOwner string, targetRepo string, t
var res *fasthttp.Response var res *fasthttp.Response
var cachedResponse fileResponse var cachedResponse fileResponse
var err error var err error
if cachedValue, ok := fileResponseCache.Get(uri + "?timestamp=" + strconv.FormatInt(options.BranchTimestamp.Unix(), 10)); ok { if cachedValue, ok := fileResponseCache.Get(uri + "?timestamp=" + strconv.FormatInt(options.BranchTimestamp.Unix(), 10)); ok && len(cachedValue.(fileResponse).body) > 0 {
cachedResponse = cachedValue.(fileResponse) cachedResponse = cachedValue.(fileResponse)
} else { } else {
req = fasthttp.AcquireRequest() req = fasthttp.AcquireRequest()
req.SetRequestURI(string(GiteaRoot) + "/api/v1/repos/" + uri) req.SetRequestURI(string(GiteaRoot) + "/api/v1/repos/" + uri + "?access_token=" + GiteaApiToken)
res = fasthttp.AcquireResponse() res = fasthttp.AcquireResponse()
res.SetBodyStream(&strings.Reader{}, -1) res.SetBodyStream(&strings.Reader{}, -1)
err = upstreamClient.Do(req, res) err = upstreamClient.Do(req, res)
@@ -425,17 +438,26 @@ func upstream(ctx *fasthttp.RequestCtx, targetOwner string, targetRepo string, t
optionsForIndexPages.AppendTrailingSlash = true optionsForIndexPages.AppendTrailingSlash = true
for _, indexPage := range IndexPages { for _, indexPage := range IndexPages {
if upstream(ctx, targetOwner, targetRepo, targetBranch, strings.TrimSuffix(targetPath, "/")+"/"+indexPage, &optionsForIndexPages) { if upstream(ctx, targetOwner, targetRepo, targetBranch, strings.TrimSuffix(targetPath, "/")+"/"+indexPage, &optionsForIndexPages) {
_ = fileResponseCache.Set(uri + "?timestamp=" + strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{ _ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{
exists: false, exists: false,
}, FileCacheTimeout) }, FileCacheTimeout)
return true return true
} }
} }
// compatibility fix for GitHub Pages (/example → /example.html)
optionsForIndexPages.AppendTrailingSlash = false
optionsForIndexPages.RedirectIfExists = targetPath + ".html"
if upstream(ctx, targetOwner, targetRepo, targetBranch, targetPath + ".html", &optionsForIndexPages) {
_ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{
exists: false,
}, FileCacheTimeout)
return true
}
} }
ctx.Response.SetStatusCode(fasthttp.StatusNotFound) ctx.Response.SetStatusCode(fasthttp.StatusNotFound)
if res != nil { if res != nil {
// Update cache if the request is fresh // Update cache if the request is fresh
_ = fileResponseCache.Set(uri + "?timestamp=" + strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{ _ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), fileResponse{
exists: false, exists: false,
}, FileCacheTimeout) }, FileCacheTimeout)
} }
@@ -447,12 +469,20 @@ func upstream(ctx *fasthttp.RequestCtx, targetOwner string, targetRepo string, t
return true return true
} }
// Append trailing slash if missing (for index files) // Append trailing slash if missing (for index files), and redirect to fix filenames in general
// options.AppendTrailingSlash is only true when looking for index pages // options.AppendTrailingSlash is only true when looking for index pages
if options.AppendTrailingSlash && !bytes.HasSuffix(ctx.Request.URI().Path(), []byte{'/'}) { if options.AppendTrailingSlash && !bytes.HasSuffix(ctx.Request.URI().Path(), []byte{'/'}) {
ctx.Redirect(string(ctx.Request.URI().Path())+"/", fasthttp.StatusTemporaryRedirect) ctx.Redirect(string(ctx.Request.URI().Path())+"/", fasthttp.StatusTemporaryRedirect)
return true return true
} }
if bytes.HasSuffix(ctx.Request.URI().Path(), []byte("/index.html")) {
ctx.Redirect(strings.TrimSuffix(string(ctx.Request.URI().Path()), "index.html"), fasthttp.StatusTemporaryRedirect)
return true
}
if options.RedirectIfExists != "" {
ctx.Redirect(options.RedirectIfExists, fasthttp.StatusTemporaryRedirect)
return true
}
s.Step("error handling") s.Step("error handling")
// Set the MIME type // Set the MIME type
@@ -491,11 +521,11 @@ func upstream(ctx *fasthttp.RequestCtx, targetOwner string, targetRepo string, t
} }
s.Step("response") s.Step("response")
if res != nil { if res != nil && ctx.Err() == nil {
cachedResponse.exists = true cachedResponse.exists = true
cachedResponse.mimeType = mimeType cachedResponse.mimeType = mimeType
cachedResponse.body = cacheBodyWriter.Bytes() cachedResponse.body = cacheBodyWriter.Bytes()
_ = fileResponseCache.Set(uri + "?timestamp=" + strconv.FormatInt(options.BranchTimestamp.Unix(), 10), cachedResponse, FileCacheTimeout) _ = fileResponseCache.Set(uri+"?timestamp="+strconv.FormatInt(options.BranchTimestamp.Unix(), 10), cachedResponse, FileCacheTimeout)
} }
return true return true
@@ -507,5 +537,6 @@ type upstreamOptions struct {
ForbiddenMimeTypes map[string]struct{} ForbiddenMimeTypes map[string]struct{}
TryIndexPages bool TryIndexPages bool
AppendTrailingSlash bool AppendTrailingSlash bool
RedirectIfExists string
BranchTimestamp time.Time BranchTimestamp time.Time
} }

View File

@@ -37,7 +37,6 @@ func TestHandlerPerformance(t *testing.T) {
t.Logf("request took %d milliseconds", end.Sub(start).Milliseconds()) t.Logf("request took %d milliseconds", end.Sub(start).Milliseconds())
} }
ctx.Response.Reset() ctx.Response.Reset()
ctx.Response.ResetBody() ctx.Response.ResetBody()
ctx.Request.SetRequestURI("http://example.momar.xyz/") ctx.Request.SetRequestURI("http://example.momar.xyz/")

View File

@@ -1,6 +1,10 @@
package main package main
import "bytes" import (
"bytes"
"encoding/gob"
"github.com/akrylysov/pogreb"
)
// GetHSTSHeader returns a HSTS header with includeSubdomains & preload for MainDomainSuffix and RawDomain, or an empty // GetHSTSHeader returns a HSTS header with includeSubdomains & preload for MainDomainSuffix and RawDomain, or an empty
// string for custom domains. // string for custom domains.
@@ -19,3 +23,34 @@ func TrimHostPort(host []byte) []byte {
} }
return host return host
} }
func PogrebPut(db *pogreb.DB, name []byte, obj interface{}) {
var resGob bytes.Buffer
resEnc := gob.NewEncoder(&resGob)
err := resEnc.Encode(obj)
if err != nil {
panic(err)
}
err = db.Put(name, resGob.Bytes())
if err != nil {
panic(err)
}
}
func PogrebGet(db *pogreb.DB, name []byte, obj interface{}) bool {
resBytes, err := db.Get(name)
if err != nil {
panic(err)
}
if resBytes == nil {
return false
}
resGob := bytes.NewBuffer(resBytes)
resDec := gob.NewDecoder(resGob)
err = resDec.Decode(obj)
if err != nil {
panic(err)
}
return true
}

28
main.go
View File

@@ -39,12 +39,11 @@ var MainDomainSuffix = []byte("." + envOr("PAGES_DOMAIN", "codeberg.page"))
// GiteaRoot specifies the root URL of the Gitea instance, without a trailing slash. // GiteaRoot specifies the root URL of the Gitea instance, without a trailing slash.
var GiteaRoot = []byte(envOr("GITEA_ROOT", "https://codeberg.org")) var GiteaRoot = []byte(envOr("GITEA_ROOT", "https://codeberg.org"))
var GiteaApiToken = envOr("GITEA_API_TOKEN", "")
//go:embed 404.html //go:embed 404.html
var NotFoundPage []byte var NotFoundPage []byte
// BrokenDNSPage will be shown (with a redirect) when trying to access a domain for which no DNS CNAME record exists.
var BrokenDNSPage = envOr("REDIRECT_BROKEN_DNS", "https://docs.codeberg.org/pages/custom-domains/")
// RawDomain specifies the domain from which raw repository content shall be served in the following format: // RawDomain specifies the domain from which raw repository content shall be served in the following format:
// https://{RawDomain}/{owner}/{repo}[/{branch|tag|commit}/{version}]/{filepath...} // https://{RawDomain}/{owner}/{repo}[/{branch|tag|commit}/{version}]/{filepath...}
// (set to []byte(nil) to disable raw content hosting) // (set to []byte(nil) to disable raw content hosting)
@@ -72,6 +71,25 @@ var IndexPages = []string{
// main sets up and starts the web server. // main sets up and starts the web server.
func main() { func main() {
if len(os.Args) > 1 && os.Args[1] == "--remove-certificate" {
if len(os.Args) < 2 {
println("--remove-certificate requires at least one domain as an argument")
os.Exit(1)
}
if keyDatabaseErr != nil {
panic(keyDatabaseErr)
}
for _, domain := range os.Args[2:] {
if err := keyDatabase.Delete([]byte(domain)); err != nil {
panic(err)
}
}
if err := keyDatabase.Sync(); err != nil {
panic(err)
}
os.Exit(0)
}
// Make sure MainDomain has a trailing dot, and GiteaRoot has no trailing slash // Make sure MainDomain has a trailing dot, and GiteaRoot has no trailing slash
if !bytes.HasPrefix(MainDomainSuffix, []byte{'.'}) { if !bytes.HasPrefix(MainDomainSuffix, []byte{'.'}) {
MainDomainSuffix = append([]byte{'.'}, MainDomainSuffix...) MainDomainSuffix = append([]byte{'.'}, MainDomainSuffix...)
@@ -92,7 +110,7 @@ func main() {
NoDefaultServerHeader: true, NoDefaultServerHeader: true,
NoDefaultDate: true, NoDefaultDate: true,
ReadTimeout: 30 * time.Second, // needs to be this high for ACME certificates with ZeroSSL & HTTP-01 challenge ReadTimeout: 30 * time.Second, // needs to be this high for ACME certificates with ZeroSSL & HTTP-01 challenge
Concurrency: 1024 * 32, // TODO: adjust bottlenecks for best performance with Gitea! Concurrency: 1024 * 32, // TODO: adjust bottlenecks for best performance with Gitea!
MaxConnsPerIP: 100, MaxConnsPerIP: 100,
} }
@@ -116,7 +134,7 @@ func main() {
} }
ctx.SetBodyString(challenge.(string)) ctx.SetBodyString(challenge.(string))
} else { } else {
ctx.Redirect("https://" + string(ctx.Host()) + string(ctx.RequestURI()), http.StatusMovedPermanently) ctx.Redirect("https://"+string(ctx.Host())+string(ctx.RequestURI()), http.StatusMovedPermanently)
} }
}) })
if err != nil { if err != nil {