Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

e2e: fix IPv6, PFCP and vpp process handling issues #180

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 8 additions & 5 deletions test/e2e/framework/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -261,21 +261,23 @@ func (f *Framework) ServerIP() net.IP {
return f.VPPCfg.GetNamespaceAddress(f.ServerNSName()).IP
}

func (f *Framework) addIP(nsName string, count *uint32) net.IP {
func (f *Framework) addIP(nsName string, count *uint32, ip6startFromByte int) net.IP {
mainAddr := f.VPPCfg.GetNamespaceAddress(nsName)
ipNet := &net.IPNet{
Mask: mainAddr.Mask,
}
startFromByte := 3
if ip4 := mainAddr.IP.To4(); ip4 != nil {
ipNet.IP = make(net.IP, net.IPv4len)
copy(ipNet.IP, ip4)
} else {
ipNet.IP = make(net.IP, net.IPv6len)
copy(ipNet.IP, mainAddr.IP)
startFromByte = ip6startFromByte
}
*count++
n := *count
for p := len(ipNet.IP) - 1; p >= 0 && n > 0; p-- {
for p := startFromByte; p >= 0 && n > 0; p-- {
n += uint32(ipNet.IP[p])
ipNet.IP[p] = byte(n)
n >>= 8
Expand All @@ -292,15 +294,16 @@ func (f *Framework) addIP(nsName string, count *uint32) net.IP {
}

func (f *Framework) AddCNodeIP() net.IP {
return f.addIP("cp", &f.numExtraCNodeIPs)
return f.addIP("cp", &f.numExtraCNodeIPs, 15)
}

func (f *Framework) AddUEIP() net.IP {
return f.addIP("ue", &f.numExtraUEIPs)
// UE IPs are /64
return f.addIP("ue", &f.numExtraUEIPs, 7)
}

func (f *Framework) AddServerIP() net.IP {
return f.addIP(f.ServerNSName(), &f.numExtraServerIPs)
return f.addIP(f.ServerNSName(), &f.numExtraServerIPs, 15)
}

// SlowGTPU returns true if UPG runs in PGW mode, and userspace GTP-U
Expand Down
2 changes: 2 additions & 0 deletions test/e2e/pfcp/pfcp.go
Original file line number Diff line number Diff line change
Expand Up @@ -894,6 +894,8 @@ func (pc *PFCPConnection) Start(ctx context.Context) error {
// broken in PFCPConnection code
tch := time.After(pc.cfg.RequestTimeout * (maxRequestAttempts + 3))
select {
case <-pc.t.Dead():
return pc.t.Wait()
case r := <-pc.startCh:
close(pc.startCh)
return r.err
Expand Down
9 changes: 7 additions & 2 deletions test/e2e/upg_e2e.go
Original file line number Diff line number Diff line change
Expand Up @@ -824,9 +824,14 @@ const leakTestNumSessions = 10000
const leakTestNumIterations = 3

var _ = ginkgo.Describe("Multiple PFCP Sessions", func() {
ginkgo.Context("[IPv4]", func() { describeMultipleSessions(framework.UPGIPModeV4) })
ginkgo.Context("[IPv6]", func() { describeMultipleSessions(framework.UPGIPModeV6) })
})

func describeMultipleSessions(ipMode framework.UPGIPMode) {
ginkgo.Context("[TDF]", func() {
// FIXME: these tests may crash UPG in UPGIPModeV6 (bad PFCP requests)
f := framework.NewDefaultFramework(framework.UPGModeTDF, framework.UPGIPModeV4)
f := framework.NewDefaultFramework(framework.UPGModeTDF, ipMode)
ginkgo.It("should not leak memory", func() {
ginkgo.By("starting memory trace")
_, err := f.VPP.Ctl("memory-trace main-heap on")
Expand Down Expand Up @@ -1045,7 +1050,7 @@ var _ = ginkgo.Describe("Multiple PFCP Sessions", func() {
deleteSession(f, seid1, true)
})
})
})
}

func describeMTU(mode framework.UPGMode, ipMode framework.UPGIPMode) {
ginkgo.Describe("[MTU corner cases]", func() {
Expand Down
71 changes: 56 additions & 15 deletions test/e2e/vpp/vpp.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,10 @@ import (
"os/exec"
"os/signal"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"

"github.com/pkg/errors"
Expand Down Expand Up @@ -192,7 +194,8 @@ func (vi *VPPInstance) prepareCommand() (*exec.Cmd, error) {
return nil, errors.Wrap(err, "error writing startup file")
}

args := []string{"--net=" + vi.vppNS.Path()}
// -F argument for nsenter specifies that it shouldn't fork
args := []string{"-F", "--net=" + vi.vppNS.Path()}
if vi.startupCfg.UseGDB {
gdbCmdsFile, err := vi.writeVPPFile("gdbcmds", "r\nbt full 30\n")
if err != nil {
Expand Down Expand Up @@ -227,10 +230,16 @@ func (vi *VPPInstance) StartVPP() error {
return errors.Wrap(err, "StderrPipe")
}

sigchldCh := make(chan os.Signal, 1)
signal.Notify(sigchldCh, unix.SIGCHLD)
if err := vi.cmd.Start(); err != nil {
return errors.Wrapf(err, "error starting vpp (%q)", vi.startupCfg.BinaryPath)
// never leave any VPP processes alive when e2e test suite exits
vi.cmd.SysProcAttr = &syscall.SysProcAttr{
Pdeathsig: syscall.SIGTERM,
}

startErrCh := make(chan error)
vi.t.Go(func() error { return vi.run(startErrCh) })
if err := <-startErrCh; err != nil {
<-vi.t.Dead()
return err
}

pid := vi.cmd.Process.Pid
Expand Down Expand Up @@ -260,12 +269,30 @@ func (vi *VPPInstance) StartVPP() error {
case core.Connected:
break
case core.Disconnected:
return errors.New("socket disconnected")
err := errors.New("socket disconnected")
vi.t.Kill(err)
return err
case core.Failed:
return errors.Wrap(e.Error, "error connecting to VPP")
err := errors.New("error connecting to VPP")
vi.t.Kill(err)
return err
}
}

go func() {
for {
select {
case <-vi.t.Dead():
return
case e := <-conev:
if e.State == core.Failed {
vi.t.Kill(errors.New("VPP API connection failed"))
return
}
}
}
}()

vi.conn = conn
vi.apiChannel, err = conn.NewAPIChannel()
if err != nil {
Expand All @@ -276,18 +303,36 @@ func (vi *VPPInstance) StartVPP() error {
}
vi.apiChannel.SetReplyTimeout(VPP_REPLY_TIMEOUT)

vi.t.Go(func() error { return vi.run(sigchldCh, conev) })

return nil
}

func (vi *VPPInstance) killVPP() {
vi.cmd.Process.Kill()
if vi.cmd.Process != nil {
vi.cmd.Process.Kill()
}
vi.pipeCopyWG.Wait()
vi.cmd.Wait()
}

func (vi *VPPInstance) run(sigchldCh chan os.Signal, conev chan core.ConnectionEvent) error {
func (vi *VPPInstance) run(startErrCh chan error) error {
// Never leave any VPP processes alive when e2e test suite exits
// Note: https://github.com/golang/go/issues/27505
// Must start the process in this goroutine and run runtime.LockOSThread()
// so that the Go thread doesn't exit and thus doesn't take the VPP process with
// it
runtime.LockOSThread()
vi.cmd.SysProcAttr = &syscall.SysProcAttr{
Pdeathsig: syscall.SIGTERM,
}

sigchldCh := make(chan os.Signal, 1)
signal.Notify(sigchldCh, unix.SIGCHLD)
err := vi.cmd.Start()
startErrCh <- err
if err != nil {
return errors.Wrapf(err, "error starting vpp (%q)", vi.startupCfg.BinaryPath)
}

pid := vi.cmd.Process.Pid
defer signal.Stop(sigchldCh)
for {
Expand All @@ -309,10 +354,6 @@ func (vi *VPPInstance) run(sigchldCh chan os.Signal, conev chan core.ConnectionE
vi.log.Info("VPP process has exited")
}
return nil
case e := <-conev:
if e.State == core.Failed {
return errors.New("VPP API connection failed")
}
}
}
}
Expand Down