Commit 45087dd2 authored by pfandzelter's avatar pfandzelter
Browse files

wip

parent 3044e32b
Pipeline #40623 failed with stages
in 24 minutes and 31 seconds
......@@ -103,12 +103,15 @@ In this example case, the following commands are required:
./gen-cert.sh fredclient 172.26.1.3
```
If you want to add additional IP addresses (e.g., a server listens on several interfaces), modify the script for additional `IP.X` entries in the `[alt_names]` section of the CSR.
If you want the certificate to be valid for a hostname, add `DNS.1`, etc.
#### Network
If you run this example in Docker, you must first create a simple network for the individual services to talk to each other:
```bash
docker network create fredwork --gateway 172.26.0.1 --subnet 172.26.0.0/16
docker network create examplenetwork --gateway 172.26.0.1 --subnet 172.26.0.0/16
```
#### NaSe
......@@ -121,7 +124,7 @@ docker run -d \
-v $(pwd)/etcdnase.crt:/cert/etcdnase.crt \
-v $(pwd)/etcdnase.key:/cert/etcdnase.key \
-v $(pwd)/ca.crt:/cert/ca.crt \
--network=fredwork \
--network=examplenetwork \
--ip=172.26.1.1 \
gcr.io/etcd-development/etcd:v3.5.0 \
etcd --name s-1 \
......@@ -151,7 +154,7 @@ docker run -d \
-v $(pwd)/frednode.crt:/cert/frednode.crt \
-v $(pwd)/frednode.key:/cert/frednode.key \
-v $(pwd)/ca.crt:/cert/ca.crt \
--network=fredwork \
--network=examplenetwork \
--ip=172.26.1.2 \
fred \
fred --log-level info \
......@@ -191,7 +194,7 @@ docker run \
-v $(pwd)/fredclient.key:/cert/fredclient.key \
-v $(pwd)/ca.crt:/cert/ca.crt \
-v $(pwd)/proto/client/client.proto:/client.proto \
--network=fredwork \
--network=examplenetwork \
--ip=172.26.1.3 \
-it \
grpcc \
......@@ -502,6 +505,7 @@ Some last words, keep pull requests small (not 100 files changed etc :D), so the
### Code Quality and Testing
In order to keep our code clean and working, we provide a number of test suites and support a number of code quality tools.
Please keep in mind that most tests require a working Docker installation.
#### Static Analysis
......@@ -545,13 +549,13 @@ This is part of a TDD approach where tests can be defined first and the software
The "3 node test" starts a FReD deployment of three FReD nodes and runs a client against the FReD cluster that validates different functionalities.
It can be found in `./tests/3NodeTest`.
It uses Docker compose and can thus easily be started with `make 3n-all`.
It uses Docker compose and can thus easily be started with `go test .`.
The deployment comprises a single `etcd` Docker container as a NaSe, a simple trigger node, two FReD nodes that each comprise only a single machine (node _B_ and _C_) with a storage server, and a distributed FReD node _A_ that comprises three individual FReD machines behind a `fredproxy` sharing a single storage server.
All machines are connected over a Docker network.
The test client runs a number of operations against the FReD deployment and outputs a list of errors.
The complete code for the test client can be found in `./tests/3NodeTest/cmd/main/main.go`.
The complete code for the test client can be found in `./tests/3NodeTest/3node_test.go`.
When the debug log output of the individual nodes is not enough to debug an issue, it is also possible to connect a `dlv` debugger directly to FReD node _B_ to set breakpoints or step through code.
This is currently configured to use the included debugger in the GoLang IDE.
......@@ -582,5 +586,4 @@ $ ./frednode --cpuprofile fredcpu.pprof --memprof fredmem.pprof [ALL_YOUR_OTHER_
# you also need to provide the path to your frednode binary
$ go tool pprof --pdf ./frednode fredcpu.pprof > cpu.pdf
$ go tool pprof --pdf ./frednode fredmem.pprof > mem.pdf
```
......@@ -20,16 +20,7 @@ COPY proto proto
RUN CGO_ENABLED=0 go install ./cmd/alexandra/
# actual Docker image
FROM scratch
WORKDIR /
COPY --from=golang /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=golang /go/bin/alexandra alexandra
EXPOSE 443
EXPOSE 10000
ENV PATH=.
ENTRYPOINT ["alexandra"]
ENTRYPOINT ["/go/bin/alexandra"]
\ No newline at end of file
......@@ -313,6 +313,8 @@ func main() {
isProxied := fc.Server.Proxy != "" && fc.Server.Host != fc.Server.Proxy
es := api.NewServer(fc.Server.Host, f.E, fc.Server.Cert, fc.Server.Key, fc.Server.CA, isProxied, fc.Server.Proxy)
log.Info().Msg("FReD Node is operational!")
quit := make(chan os.Signal, 1)
signal.Notify(quit,
os.Interrupt,
......
......@@ -33,6 +33,11 @@ EOF
# write the CN into the config file
echo "CN = ${NAME}" >> "${NAME}".conf
# this is the CSR config
# the IP.1 and DNS.1 entries in [alt_names] are required
# the given IP will be added as IP.2
# if you need more IP addresses, stop at this point and add IP.3, etc
# if you want a hostname (e.g., localhost), add DNS.2, etc
cat >> ${NAME}.conf <<EOF
[v3_req]
keyUsage = keyEncipherment, dataEncipherment, digitalSignature
......@@ -40,6 +45,7 @@ extendedKeyUsage = serverAuth, clientAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = localhost
IP.1 = 127.0.0.1
EOF
......
......@@ -30,7 +30,7 @@ func NewServer(host string, caCert string, serverCert string, serverKey string,
log.Fatal().Msg("alexandra server: no certificate file given")
}
if serverCert == "" {
if serverKey == "" {
log.Fatal().Msg("alexandra server: no key file given")
}
......
**/*
!/pkg/*
!/cmd/*
!nodeA-haproxy.cfg
This diff is collapsed.
# Shamelessly stolen from the original dockerfile
# building the binary
FROM golang:1.16-alpine
LABEL maintainer="tp@mcc.tu-berlin.de"
WORKDIR /go/src/git.tu-berlin.de/mcc-fred/fred/
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
#COPY nase/tls/ca.crt /usr/local/share/ca-certificates/ca.crt
RUN update-ca-certificates
# Make an extra layer for the installed packages so that they dont have to be downloaded everytime
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY tests tests
COPY pkg pkg
COPY proto proto
RUN go install ./tests/3NodeTest/cmd/main/
ENTRYPOINT ["/go/bin/main"]
package main
import (
"strconv"
)
type AuthenticationSuite struct {
c *Config
}
func (t *AuthenticationSuite) Name() string {
return "Authentication"
}
func (t *AuthenticationSuite) RunTests() {
// test RBAC and authentication
logNodeAction(t.c.nodeA, "create keygroup \"rbactest\"")
t.c.nodeA.CreateKeygroup("rbactest", true, 0, false)
logNodeAction(t.c.nodeA, "put item into keygroup \"rbactest\"")
t.c.nodeA.PutItem("rbactest", "item1", "value1", false)
logNodeAction(t.c.nodeA, "add little client as read only to rbac test")
t.c.nodeA.AddUser("littleclient", "rbactest", "ReadKeygroup", false)
logNodeAction(t.c.nodeA, "try to read with little client -> should work")
if val, _ := t.c.littleClient.GetItem("rbactest", "item1", false); len(val) == 0 || val[0] != "value1" {
if len(val) == 0 {
logNodeFailure(t.c.nodeA, "1 return value", "0 return values")
} else {
logNodeFailure(t.c.nodeA, "value1", val[0])
}
}
logNodeAction(t.c.nodeA, "try to write with little client -> should not work")
t.c.littleClient.PutItem("rbactest", "item1", "value2", true)
logNodeAction(t.c.nodeA, "add role configure replica to little client -> should work")
t.c.nodeA.AddUser("littleclient", "rbactest", "ConfigureReplica", false)
logNodeAction(t.c.nodeA, "add replica nodeB to keygroup with little client -> should work")
t.c.littleClient.AddKeygroupReplica("rbactest", t.c.nodeB.ID, 0, false)
logNodeAction(t.c.nodeB, "remove permission to read from keygroup -> should work")
t.c.nodeB.RemoveUser("littleclient", "rbactest", "ReadKeygroup", false)
// TODO: delay is caused by etcd taking some time to inform watcher to invalidate cache
// time.Sleep(100 * time.Second)
logNodeAction(t.c.nodeA, "try to read from keygroup with little client -> should not work")
if val, _ := t.c.littleClient.GetItem("rbactest", "item1", true); len(val) != 0 {
logNodeFailure(t.c.nodeA, "0 return values", strconv.Itoa(len(val))+" return values")
}
}
func NewAuthenticationSuite(c *Config) *AuthenticationSuite {
return &AuthenticationSuite{
c: c,
}
}
package main
import (
"fmt"
"git.tu-berlin.de/mcc-fred/fred/tests/3NodeTest/pkg/grpcclient"
)
type ConcurrencySuite struct {
c *Config
}
func concurrentUpdates(nodes []*grpcclient.Node, concurrent int, updates int, run int) {
if len(nodes) < 1 {
return
}
keygroup := fmt.Sprintf("concurrencyTest%d", run)
logNodeAction(nodes[0], "Create keygroup %s", keygroup)
nodes[0].CreateKeygroup(keygroup, true, 0, false)
for i, n := range nodes {
if i == 0 {
continue
}
logNodeAction(n, "adding node as replica for %s", keygroup)
nodes[0].AddKeygroupReplica(keygroup, n.ID, 0, false)
}
expected := make([]map[string]string, concurrent)
done := make(chan struct{})
for i := 0; i < concurrent; i++ {
expected[i] = make(map[string]string)
go func(node *grpcclient.Node, keygroup string, expected *map[string]string) {
for j := 0; j < updates; j++ {
key := randStringBytes(1)
val := randStringBytes(10)
node.PutItem(keygroup, key, val, false)
(*expected)[key] = val
}
done <- struct{}{}
}(nodes[i%len(nodes)], keygroup, &expected[i])
}
// block until all goroutines have finished
for i := 0; i < concurrent; i++ {
<-done
}
// let's check if everything worked
for i := 0; i < concurrent; i++ {
for key, val := range expected[i] {
v, versions := nodes[0].GetItem(keygroup, key, false)
if len(v) == 0 {
logNodeFailure(nodes[0], fmt.Sprintf("expected value %s for %s", val, key), "got no return value")
continue
}
if len(v) > concurrent {
logNodeFailure(nodes[0], fmt.Sprintf("expected value %s for %s", val, key), fmt.Sprintf("got %d return values: %#v %#v", len(v), v, versions))
continue
}
for j := range v {
if v[j] == val {
// ok!
continue
}
// hm, our returned value isn't the same as it should be - let's check the other maps
found := false
var possibleVals []string
for l := 0; l < concurrent; l++ {
jVal, ok := expected[l][key]
if !ok {
continue
}
possibleVals = append(possibleVals, jVal)
if jVal == v[j] {
found = true
break
}
}
if !found {
logNodeFailure(nodes[0], fmt.Sprintf("one of values %#v for %s", possibleVals, key), fmt.Sprintf("got wrong value %#v", v))
}
}
}
}
}
func (t *ConcurrencySuite) Name() string {
return "Concurrency"
}
func (t *ConcurrencySuite) RunTests() {
run := 0
// Test 1: create a keygroup on a node, have it updated by two concurrent goroutines
// expected behavior: all updates arrive
//concurrentUpdates([]*grpcclient.Node{t.c.nodeA}, 2, 1000, run)
// Test 2: create a keygroup on a node, have it updated by 100 concurrent goroutines
// expected behavior: all updates arrive
run++
//concurrentUpdates([]*grpcclient.Node{t.c.nodeA}, 100, 100, run)
// Test 3: create a keygroup on two nodes, have one goroutine update data at each node
// expected behavior: all updates arrive, both nodes have the same data
run++
concurrentUpdates([]*grpcclient.Node{t.c.nodeA, t.c.nodeB}, 2, 10, run)
}
func NewConcurrencySuite(c *Config) *ConcurrencySuite {
return &ConcurrencySuite{
c: c,
}
}
package main
import (
"fmt"
"strconv"
"time"
"git.tu-berlin.de/mcc-fred/fred/tests/3NodeTest/pkg/grpcclient"
)
type ConcurrencyImmutableSuite struct {
c *Config
}
func concurrentUpdatesImmutable(nodes []*grpcclient.Node, concurrent int, updates int, run int) {
if len(nodes) < 1 {
return
}
keygroup := fmt.Sprintf("concurrencyTestImmutable%d", run)
logNodeAction(nodes[0], "Create keygroup %s", keygroup)
nodes[0].CreateKeygroup(keygroup, false, 0, false)
for i, n := range nodes {
if i == 0 {
continue
}
logNodeAction(n, "adding node as replica for %s", keygroup)
nodes[0].AddKeygroupReplica(keygroup, n.ID, 0, false)
}
expected := make([]map[uint64]string, concurrent)
done := make(chan struct{})
for i := 0; i < concurrent; i++ {
expected[i] = make(map[uint64]string)
go func(i int, node *grpcclient.Node, keygroup string, expected *map[uint64]string) {
for j := 0; j < updates; j++ {
val := randStringBytes(10)
id := uint64(time.Now().UnixNano()) + uint64(i)
_ = node.AppendItem(keygroup, id, val, false)
(*expected)[id] = val
}
done <- struct{}{}
}(i, nodes[i%len(nodes)], keygroup, &expected[i])
}
// block until all goroutines have finished
for i := 0; i < concurrent; i++ {
<-done
}
// let's check if everything worked
// in this case no same key should be in in different maps
keys := make(map[uint64]string)
for i := 0; i < concurrent; i++ {
for key, val := range expected[i] {
if _, ok := keys[key]; ok {
logNodeFailure(nodes[0], fmt.Sprintf("only one client can write to key %d", key), fmt.Sprintf("several clients were able to write to same id for %d", key))
continue
}
keys[key] = val
}
}
for key, val := range keys {
k := strconv.FormatUint(key, 10)
v, _ := nodes[0].GetItem(keygroup, k, false)
if len(v) != 1 {
logNodeFailure(nodes[0], fmt.Sprintf("expected one value for %d", key), fmt.Sprintf("got no %d return values", len(v)))
continue
}
if v[0] != val {
logNodeFailure(nodes[0], fmt.Sprintf("value %s for %d", val, key), fmt.Sprintf("got wrong value %s", v[0]))
}
}
}
func (t *ConcurrencyImmutableSuite) Name() string {
return "ConcurrencyImmutable"
}
func (t *ConcurrencyImmutableSuite) RunTests() {
run := 0
// Test 1: create immutable keygroup, have two goroutines append data
// expected behavior: all updates arrive
run++
concurrentUpdatesImmutable([]*grpcclient.Node{t.c.nodeA}, 2, 500, run)
// Test 2: create immutable keygroup, have 100 goroutines append data
// expected behavior: all updates arrive
run++
concurrentUpdatesImmutable([]*grpcclient.Node{t.c.nodeA}, 100, 50, run)
// Test 3: create immutable keygroup on two nodes, have one goroutine each append data
// expected behavior: all updates arrive
run++
concurrentUpdatesImmutable([]*grpcclient.Node{t.c.nodeA, t.c.nodeB}, 2, 100, run)
}
func NewConcurrencyImmutableSuite(c *Config) *ConcurrencyImmutableSuite {
return &ConcurrencyImmutableSuite{
c: c,
}
}
package main
import (
"time"
)
type ExpirySuite struct {
c *Config
}
func (t *ExpirySuite) Name() string {
return "Expiry"
}
func (t *ExpirySuite) RunTests() {
// test expiring data items
logNodeAction(t.c.nodeC, "Create normal keygroup on nodeC without expiry")
t.c.nodeC.CreateKeygroup("expirytest", true, 0, false)
logNodeAction(t.c.nodeC, "Add nodeA as replica with expiry of 5s")
t.c.nodeC.AddKeygroupReplica("expirytest", t.c.nodeA.ID, 5, false)
logNodeAction(t.c.nodeC, "Update something on nodeC")
t.c.nodeC.PutItem("expirytest", "test", "test", false)
logNodeAction(t.c.nodeA, "Test whether nodeA has received the update. Wait 5s and check that it is not there anymore")
t.c.nodeA.GetItem("expirytest", "test", false)
time.Sleep(5 * time.Second)
t.c.nodeA.GetItem("expirytest", "test", true)
logNodeAction(t.c.nodeC, "....the item should still exist with nodeC")
t.c.nodeC.GetItem("expirytest", "test", false)
}
func NewExpirySuite(c *Config) *ExpirySuite {
return &ExpirySuite{
c: c,
}
}
package main
import (
"fmt"
)
type ImmutableSuite struct {
c *Config
}
func (t *ImmutableSuite) Name() string {
return "Immutable"
}
func (t *ImmutableSuite) RunTests() {
// testing immutable keygroups
logNodeAction(t.c.nodeB, "Testing immutable keygroups by creating a new immutable keygroup on nodeB")
t.c.nodeB.CreateKeygroup("log", false, 0, false)
logNodeAction(t.c.nodeB, "Creating an item in this keygroup")
res := t.c.nodeB.AppendItem("log", 0, "value1", false)
if res != "0" {
logNodeFailure(t.c.nodeB, "0", res)
}
logNodeAction(t.c.nodeB, "Updating an item in this keygroup")
t.c.nodeB.PutItem("log", res, "value-2", true)
logNodeAction(t.c.nodeB, "Getting updated item from immutable keygroup")
respB, _ := t.c.nodeB.GetItem("log", res, false)
if len(respB) != 1 || respB[0] != "value1" {
logNodeFailure(t.c.nodeB, "resp is []{\"value1\"}", fmt.Sprintf("%#v", respB))
}
logNodeAction(t.c.nodeB, "Deleting an item in immutable keygroup")
t.c.nodeB.DeleteItem("log", "0", true)
logNodeAction(t.c.nodeB, "Adding nodeC as replica to immutable keygroup")
t.c.nodeB.AddKeygroupReplica("log", t.c.nodeC.ID, 0, false)
logNodeAction(t.c.nodeC, "Updating immutable item on other nodeC")
t.c.nodeC.PutItem("log", "0", "value-3", true)
logNodeAction(t.c.nodeC, "Appending another item to readonly log.")
res = t.c.nodeC.AppendItem("log", 1, "value-4", false)
if res != "1" {
logNodeFailure(t.c.nodeC, "1", res)
}
logNodeAction(t.c.nodeC, "Appending another item to readonly log with a previous ID.")
_ = t.c.nodeC.AppendItem("log", 1, "value-5", true)
}
func NewImmutableSuite(c *Config) *ImmutableSuite {
return &ImmutableSuite{
c: c,
}
}
package main
import (
"bufio"
"flag"
"math/rand"
"os"
"strconv"
"strings"
"time"
"git.tu-berlin.de/mcc-fred/fred/tests/3NodeTest/pkg/grpcclient"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
var (
// Wait for the user to press enter to continue
waitUser *bool
waitTime *time.Duration
reader = bufio.NewReader(os.Stdin)
currSuite int
failures map[int]int
)
// https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-go#31832326
// removing capital letters so we can get more conflicts
const letterBytes = "abcdefghijklmnopqrstuvwxyz"
func randStringBytes(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return string(b)
}