Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sdn: disable hostports when CRI-O is used #16463

Merged
merged 1 commit into from
Sep 26, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions pkg/cmd/server/kubernetes/network/sdn_linux.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package network

import (
"strings"

"k8s.io/kubernetes/pkg/apis/componentconfig"
kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
kinternalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
Expand All @@ -22,6 +24,11 @@ func NewSDNInterfaces(options configapi.NodeConfig, originClient *osclient.Clien
}
}

// dockershim + kube CNI driver delegates hostport handling to plugins,
// while CRI-O handles hostports itself. Thus we need to disable the
// SDN's hostport handling when run under CRI-O.
enableHostports := !strings.Contains(runtimeEndpoint, "crio")

node, err := sdnnode.New(&sdnnode.OsdnNodeConfig{
PluginName: options.NetworkConfig.NetworkPluginName,
Hostname: options.NodeName,
Expand All @@ -33,6 +40,7 @@ func NewSDNInterfaces(options configapi.NodeConfig, originClient *osclient.Clien
KubeInformers: internalKubeInformers,
IPTablesSyncPeriod: proxyconfig.IPTables.SyncPeriod.Duration,
ProxyMode: proxyconfig.Mode,
EnableHostports: enableHostports,
})
if err != nil {
return nil, nil, err
Expand Down
3 changes: 2 additions & 1 deletion pkg/network/node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ type OsdnNodeConfig struct {
SelfIP string
RuntimeEndpoint string
MTU uint32
EnableHostports bool

OSClient *osclient.Client
KClient kclientset.Interface
Expand Down Expand Up @@ -178,7 +179,7 @@ func New(c *OsdnNodeConfig) (network.NodeInterface, error) {
kClient: c.KClient,
osClient: c.OSClient,
oc: oc,
podManager: newPodManager(c.KClient, policy, c.MTU, oc),
podManager: newPodManager(c.KClient, policy, c.MTU, oc, c.EnableHostports),
localIP: c.SelfIP,
hostName: c.Hostname,
useConnTrack: useConnTrack,
Expand Down
27 changes: 19 additions & 8 deletions pkg/network/node/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,20 +67,23 @@ type podManager struct {
mtu uint32
ovs *ovsController

enableHostports bool

// Things only accessed through the processCNIRequests() goroutine
// and thus can be set from Start()
ipamConfig []byte
hostportSyncer kubehostport.HostportSyncer
}

// Creates a new live podManager; used by node code0
func newPodManager(kClient kclientset.Interface, policy osdnPolicy, mtu uint32, ovs *ovsController) *podManager {
func newPodManager(kClient kclientset.Interface, policy osdnPolicy, mtu uint32, ovs *ovsController, enableHostports bool) *podManager {
pm := newDefaultPodManager()
pm.kClient = kClient
pm.policy = policy
pm.mtu = mtu
pm.podHandler = pm
pm.ovs = ovs
pm.enableHostports = enableHostports
return pm
}

Expand Down Expand Up @@ -150,7 +153,9 @@ func getIPAMConfig(clusterNetwork *net.IPNet, localSubnet string) ([]byte, error

// Start the CNI server and start processing requests from it
func (m *podManager) Start(socketPath string, localSubnetCIDR string, clusterNetwork *net.IPNet) error {
m.hostportSyncer = kubehostport.NewHostportSyncer()
if m.enableHostports {
m.hostportSyncer = kubehostport.NewHostportSyncer()
}

var err error
if m.ipamConfig, err = getIPAMConfig(clusterNetwork, localSubnetCIDR); err != nil {
Expand Down Expand Up @@ -499,8 +504,10 @@ func (m *podManager) setup(req *cniserver.PodRequest) (cnitypes.Result, *running
defer func() {
if !success {
m.ipamDel(req.SandboxID)
if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil {
glog.Warningf("failed syncing hostports: %v", err)
if m.hostportSyncer != nil {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

chain of booleans. Can we check for enableHostports here?

if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil {
glog.Warningf("failed syncing hostports: %v", err)
}
}
}
}()
Expand All @@ -511,8 +518,10 @@ func (m *podManager) setup(req *cniserver.PodRequest) (cnitypes.Result, *running
return nil, nil, err
}
podPortMapping := kubehostport.ConstructPodPortMapping(&v1Pod, podIP)
if err := m.hostportSyncer.OpenPodHostportsAndSync(podPortMapping, Tun0, m.getRunningPods()); err != nil {
return nil, nil, err
if m.hostportSyncer != nil {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same as above.

if err := m.hostportSyncer.OpenPodHostportsAndSync(podPortMapping, Tun0, m.getRunningPods()); err != nil {
return nil, nil, err
}
}

var hostVethName, contVethMac string
Expand Down Expand Up @@ -631,8 +640,10 @@ func (m *podManager) teardown(req *cniserver.PodRequest) error {
errList = append(errList, err)
}

if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil {
errList = append(errList, err)
if m.hostportSyncer != nil {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

as pointed above

if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil {
errList = append(errList, err)
}
}

return kerrors.NewAggregate(errList)
Expand Down