Skip to content

Commit

Permalink
sdn: disable hostport handling when CRIO is used
Browse files Browse the repository at this point in the history
This is a workaround because CRIO's CNI driver has different
hostport behavior than Kubernetes' CNI driver.  Kube leaves all
hostport handling to the CNI plugin itself, while CRIO does
hostport handling internally, leading to duplicate work if the
plugin also handles hostports.

For now, detect the runtime based on socket path and disable
openshift-sdn hostport handling if the runtime is CRIO.

The real fix for this is to add hostport handling to Kube's CNI
driver, but in the "split" mode discussed upstream where kube's
CNI driver handles the port reservations on the host, while the
plugin handles the actual iptables rules.  CRIO should be converted
to this scheme as well, and plugins will indicate with capabilities
in the CNI JSON whether they support this scheme or not.  At
that point we can remove this hack and just have openshift-sdn
advertise portmapping support via the CNI JSON.
  • Loading branch information
dcbw committed Sep 20, 2017
1 parent 9bee56c commit 59b2b62
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 9 deletions.
8 changes: 8 additions & 0 deletions pkg/cmd/server/kubernetes/network/sdn_linux.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package network

import (
"strings"

"k8s.io/kubernetes/pkg/apis/componentconfig"
kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
kinternalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
Expand All @@ -22,6 +24,11 @@ func NewSDNInterfaces(options configapi.NodeConfig, originClient *osclient.Clien
}
}

// dockershim + kube CNI driver delegates hostport handling to plugins,
// while CRI-O handles hostports itself. Thus we need to disable the
// SDN's hostport handling when run under CRI-O.
enableHostports := !strings.Contains(runtimeEndpoint, "crio")

node, err := sdnnode.New(&sdnnode.OsdnNodeConfig{
PluginName: options.NetworkConfig.NetworkPluginName,
Hostname: options.NodeName,
Expand All @@ -33,6 +40,7 @@ func NewSDNInterfaces(options configapi.NodeConfig, originClient *osclient.Clien
KubeInformers: internalKubeInformers,
IPTablesSyncPeriod: proxyconfig.IPTables.SyncPeriod.Duration,
ProxyMode: proxyconfig.Mode,
EnableHostports: enableHostports,
})
if err != nil {
return nil, nil, err
Expand Down
3 changes: 2 additions & 1 deletion pkg/network/node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ type OsdnNodeConfig struct {
SelfIP string
RuntimeEndpoint string
MTU uint32
EnableHostports bool

OSClient *osclient.Client
KClient kclientset.Interface
Expand Down Expand Up @@ -178,7 +179,7 @@ func New(c *OsdnNodeConfig) (network.NodeInterface, error) {
kClient: c.KClient,
osClient: c.OSClient,
oc: oc,
podManager: newPodManager(c.KClient, policy, c.MTU, oc),
podManager: newPodManager(c.KClient, policy, c.MTU, oc, c.EnableHostports),
localIP: c.SelfIP,
hostName: c.Hostname,
useConnTrack: useConnTrack,
Expand Down
27 changes: 19 additions & 8 deletions pkg/network/node/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,20 +67,23 @@ type podManager struct {
mtu uint32
ovs *ovsController

enableHostports bool

// Things only accessed through the processCNIRequests() goroutine
// and thus can be set from Start()
ipamConfig []byte
hostportSyncer kubehostport.HostportSyncer
}

// Creates a new live podManager; used by node code0
func newPodManager(kClient kclientset.Interface, policy osdnPolicy, mtu uint32, ovs *ovsController) *podManager {
func newPodManager(kClient kclientset.Interface, policy osdnPolicy, mtu uint32, ovs *ovsController, enableHostports bool) *podManager {
pm := newDefaultPodManager()
pm.kClient = kClient
pm.policy = policy
pm.mtu = mtu
pm.podHandler = pm
pm.ovs = ovs
pm.enableHostports = enableHostports
return pm
}

Expand Down Expand Up @@ -150,7 +153,9 @@ func getIPAMConfig(clusterNetwork *net.IPNet, localSubnet string) ([]byte, error

// Start the CNI server and start processing requests from it
func (m *podManager) Start(socketPath string, localSubnetCIDR string, clusterNetwork *net.IPNet) error {
m.hostportSyncer = kubehostport.NewHostportSyncer()
if m.enableHostports {
m.hostportSyncer = kubehostport.NewHostportSyncer()
}

var err error
if m.ipamConfig, err = getIPAMConfig(clusterNetwork, localSubnetCIDR); err != nil {
Expand Down Expand Up @@ -499,8 +504,10 @@ func (m *podManager) setup(req *cniserver.PodRequest) (cnitypes.Result, *running
defer func() {
if !success {
m.ipamDel(req.SandboxID)
if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil {
glog.Warningf("failed syncing hostports: %v", err)
if m.hostportSyncer != nil {
if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil {
glog.Warningf("failed syncing hostports: %v", err)
}
}
}
}()
Expand All @@ -511,8 +518,10 @@ func (m *podManager) setup(req *cniserver.PodRequest) (cnitypes.Result, *running
return nil, nil, err
}
podPortMapping := kubehostport.ConstructPodPortMapping(&v1Pod, podIP)
if err := m.hostportSyncer.OpenPodHostportsAndSync(podPortMapping, Tun0, m.getRunningPods()); err != nil {
return nil, nil, err
if m.hostportSyncer != nil {
if err := m.hostportSyncer.OpenPodHostportsAndSync(podPortMapping, Tun0, m.getRunningPods()); err != nil {
return nil, nil, err
}
}

var hostVethName, contVethMac string
Expand Down Expand Up @@ -631,8 +640,10 @@ func (m *podManager) teardown(req *cniserver.PodRequest) error {
errList = append(errList, err)
}

if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil {
errList = append(errList, err)
if m.hostportSyncer != nil {
if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil {
errList = append(errList, err)
}
}

return kerrors.NewAggregate(errList)
Expand Down

0 comments on commit 59b2b62

Please sign in to comment.