Fábio Kaiser Rauber
4 years ago
14 changed files with 1084 additions and 0 deletions
@ -0,0 +1,23 @@ |
|||
# Patterns to ignore when building packages. |
|||
# This supports shell glob matching, relative path matching, and |
|||
# negation (prefixed with !). Only one pattern per line. |
|||
.DS_Store |
|||
# Common VCS dirs |
|||
.git/ |
|||
.gitignore |
|||
.bzr/ |
|||
.bzrignore |
|||
.hg/ |
|||
.hgignore |
|||
.svn/ |
|||
# Common backup files |
|||
*.swp |
|||
*.bak |
|||
*.tmp |
|||
*.orig |
|||
*~ |
|||
# Various IDEs |
|||
.project |
|||
.idea/ |
|||
*.tmproj |
|||
.vscode/ |
@ -0,0 +1,13 @@ |
|||
apiVersion: v2 |
|||
name: nsx-ncp-operator |
|||
description: An operator for leveraging NSX as the default container networking solution for an Kubernetes/Openshift cluster. |
|||
|
|||
type: application |
|||
|
|||
version: 0.1.0 |
|||
|
|||
# This is the version number of the application being deployed. This version number should be |
|||
# incremented each time you make changes to the application. Versions are not expected to |
|||
# follow Semantic Versioning. They should reflect the version the application is using. |
|||
# It is recommended to use it with quotes. |
|||
appVersion: "v3.1.2" |
@ -0,0 +1,75 @@ |
|||
apiVersion: apiextensions.k8s.io/v1beta1 |
|||
kind: CustomResourceDefinition |
|||
metadata: |
|||
name: ncpinstalls.operator.nsx.vmware.com |
|||
spec: |
|||
group: operator.nsx.vmware.com |
|||
names: |
|||
kind: NcpInstall |
|||
listKind: NcpInstallList |
|||
plural: ncpinstalls |
|||
singular: ncpinstall |
|||
scope: Namespaced |
|||
subresources: |
|||
status: {} |
|||
validation: |
|||
openAPIV3Schema: |
|||
description: NcpInstall is the Schema for the ncpinstalls API |
|||
properties: |
|||
apiVersion: |
|||
description: 'APIVersion defines the versioned schema of this representation |
|||
of an object. Servers should convert recognized schemas to the latest |
|||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' |
|||
type: string |
|||
kind: |
|||
description: 'Kind is a string value representing the REST resource this |
|||
object represents. Servers may infer this from the endpoint the client |
|||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' |
|||
type: string |
|||
metadata: |
|||
type: object |
|||
spec: |
|||
description: NcpInstallSpec defines the desired state of NcpInstall |
|||
type: object |
|||
properties: |
|||
ncpReplicas: |
|||
description: the replica numbers of nsx-ncp deployment |
|||
type: integer |
|||
format: int32 |
|||
minimum: 1 |
|||
addNodeTag: |
|||
description: Tag node logical switch port with node name and cluster when set to true, skip tagging when set to false |
|||
type: boolean |
|||
status: |
|||
description: NcpInstallStatus defines the observed state of NcpInstall |
|||
type: object |
|||
properties: |
|||
conditions: |
|||
description: conditions is a list of conditions and their status |
|||
type: array |
|||
items: |
|||
description: It is just the standard condition fields |
|||
type: object |
|||
properties: |
|||
lastTransitionTime: |
|||
description: Last time the condition transit from one status to another |
|||
type: string |
|||
format: date-time |
|||
type: |
|||
description: Type of condition |
|||
type: string |
|||
status: |
|||
description: Status of condition, one of 'True', 'False', 'Unknown' |
|||
type: string |
|||
reason: |
|||
description: Brief reason for the condition |
|||
type: string |
|||
message: |
|||
description: Human readable message indicating details |
|||
type: string |
|||
type: object |
|||
version: v1 |
|||
versions: |
|||
- name: v1 |
|||
served: true |
|||
storage: true |
@ -0,0 +1 @@ |
|||
|
@ -0,0 +1,62 @@ |
|||
{{/* |
|||
Expand the name of the chart. |
|||
*/}} |
|||
{{- define "nsx-ncp-operator.name" -}} |
|||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} |
|||
{{- end }} |
|||
|
|||
{{/* |
|||
Create a default fully qualified app name. |
|||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). |
|||
If release name contains chart name it will be used as a full name. |
|||
*/}} |
|||
{{- define "nsx-ncp-operator.fullname" -}} |
|||
{{- if .Values.fullnameOverride }} |
|||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} |
|||
{{- else }} |
|||
{{- $name := default .Chart.Name .Values.nameOverride }} |
|||
{{- if contains $name .Release.Name }} |
|||
{{- .Release.Name | trunc 63 | trimSuffix "-" }} |
|||
{{- else }} |
|||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} |
|||
{{- end }} |
|||
{{- end }} |
|||
{{- end }} |
|||
|
|||
{{/* |
|||
Create chart name and version as used by the chart label. |
|||
*/}} |
|||
{{- define "nsx-ncp-operator.chart" -}} |
|||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} |
|||
{{- end }} |
|||
|
|||
{{/* |
|||
Common labels |
|||
*/}} |
|||
{{- define "nsx-ncp-operator.labels" -}} |
|||
helm.sh/chart: {{ include "nsx-ncp-operator.chart" . }} |
|||
{{ include "nsx-ncp-operator.selectorLabels" . }} |
|||
{{- if .Chart.AppVersion }} |
|||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} |
|||
{{- end }} |
|||
app.kubernetes.io/managed-by: {{ .Release.Service }} |
|||
{{- end }} |
|||
|
|||
{{/* |
|||
Selector labels |
|||
*/}} |
|||
{{- define "nsx-ncp-operator.selectorLabels" -}} |
|||
app.kubernetes.io/name: {{ include "nsx-ncp-operator.name" . }} |
|||
app.kubernetes.io/instance: {{ .Release.Name }} |
|||
{{- end }} |
|||
|
|||
{{/* |
|||
Create the name of the service account to use |
|||
*/}} |
|||
{{- define "nsx-ncp-operator.serviceAccountName" -}} |
|||
{{- if .Values.serviceAccount.create }} |
|||
{{- default (include "nsx-ncp-operator.fullname" .) .Values.serviceAccount.name }} |
|||
{{- else }} |
|||
{{- default "default" .Values.serviceAccount.name }} |
|||
{{- end }} |
|||
{{- end }} |
@ -0,0 +1,689 @@ |
|||
apiVersion: v1 |
|||
kind: ConfigMap |
|||
metadata: |
|||
name: nsx-ncp-operator-config |
|||
labels: |
|||
{{- include "nsx-ncp-operator.labels" . | nindent 4 }} |
|||
data: |
|||
ncp.ini: | |
|||
|
|||
[vc] |
|||
|
|||
# IpAddress or Hostname of VC |
|||
#vc_endpoint = <None> |
|||
|
|||
# The SSO domain associated with the deployment |
|||
#sso_domain = vsphere.local |
|||
|
|||
# VC API server HTTPS port. |
|||
#https_port = 443 |
|||
|
|||
|
|||
[coe] |
|||
|
|||
# Container orchestrator adaptor to plug in. |
|||
adaptor = kubernetes |
|||
|
|||
# Specify cluster for adaptor. |
|||
cluster = k8scl-one |
|||
|
|||
# Log level for NCP operations |
|||
# Choices: NOTSET DEBUG INFO WARNING ERROR CRITICAL |
|||
#loglevel = <None> |
|||
|
|||
# Log level for NSX API client operations |
|||
# Choices: NOTSET DEBUG INFO WARNING ERROR CRITICAL |
|||
#nsxlib_loglevel = <None> |
|||
|
|||
# Enable SNAT for all projects in this cluster. Modification of topologies |
|||
# for existing Namespaces is not supported if this option is reset. |
|||
#enable_snat = True |
|||
|
|||
# Option to enable profiling |
|||
#profiling = False |
|||
|
|||
# The interval of reporting performance metrics (0 means disabled) |
|||
#metrics_interval = 0 |
|||
|
|||
# Name of log file for outputting metrics only (if not defined, use default |
|||
# logging facility) |
|||
#metrics_log_file = <None> |
|||
|
|||
# The type of container host node |
|||
# Choices: HOSTVM BAREMETAL CLOUD WCP_WORKER |
|||
#node_type = HOSTVM |
|||
|
|||
# The time in seconds for NCP/nsx_node_agent to recover the connection to |
|||
# NSX manager/container orchestrator adaptor/Hyperbus before exiting. If |
|||
# the value is 0, NCP/nsx_node_agent won't exit automatically when the |
|||
# connection check fails |
|||
#connect_retry_timeout = 0 |
|||
|
|||
|
|||
# Enable system health status report for SHA |
|||
#enable_sha = True |
|||
|
|||
|
|||
[DEFAULT] |
|||
|
|||
# If set to true, the logging level will be set to DEBUG instead of the |
|||
# default INFO level. |
|||
#debug = False |
|||
|
|||
# If set to true, log output to standard error. |
|||
#use_stderr = True |
|||
|
|||
# Destination to send api log to. STDOUT or STDERR for console output. FILE |
|||
# to write log to file configured in "api_log_file". NONE to disable api |
|||
# log. |
|||
# Choices: STDOUT STDERR FILE NONE |
|||
#api_log_output = NONE |
|||
|
|||
# Name of log file to send API access log to. |
|||
#api_log_file = ncp_api_log.txt |
|||
|
|||
# Interval in seconds to logs api call to output configured in |
|||
# api_log_output |
|||
#api_log_interval = 60 |
|||
|
|||
# When api_log_output is not NONE, this option determines if api calls |
|||
# should be collected per NSX cluster or individual NSX manager. |
|||
# Choices: API_LOG_PER_ENDPOINT API_LOG_PER_CLUSTER |
|||
#api_log_mode = API_LOG_PER_ENDPOINT |
|||
|
|||
# If set to true, use syslog for logging. |
|||
#use_syslog = False |
|||
|
|||
# The base directory used for relative log_file paths. |
|||
#log_dir = <None> |
|||
|
|||
# Name of log file to send logging output to. |
|||
#log_file = <None> |
|||
|
|||
# max MB for each compressed file. Defaults to 100 MB. |
|||
#log_rotation_file_max_mb = 100 |
|||
|
|||
# max MB for each compressed file for API logs.Defaults to 10 MB. |
|||
#api_log_rotation_file_max_mb = 10 |
|||
|
|||
# Total number of compressed backup files to store. Defaults to 5. |
|||
#log_rotation_backup_count = 5 |
|||
|
|||
# Total number of compressed backup files to store API logs. Defaults to 5. |
|||
#api_log_rotation_backup_count = 5 |
|||
|
|||
# Log level for the root logger. If debug=True, the default root logger |
|||
# level will be DEBUG regardless of the value of this option. If this |
|||
# option is unset, the default root logger level will be either DEBUG or |
|||
# INFO according to the debug option value |
|||
# Choices: NOTSET DEBUG INFO WARNING ERROR CRITICAL |
|||
#loglevel = <None> |
|||
|
|||
|
|||
|
|||
|
|||
[nsx_v3] |
|||
|
|||
# Set NSX API adaptor to NSX Policy API adaptor. If unset, NSX adaptor will |
|||
# be set to the NSX Manager based adaptor. If unset or False, the NSX |
|||
# resource ID in other options can be resource name or UUID |
|||
policy_nsxapi = True |
|||
|
|||
|
|||
|
|||
# Path to NSX client certificate file. If specified, the nsx_api_user and |
|||
# nsx_api_password options will be ignored. Must be specified along with |
|||
# nsx_api_private_key_file option |
|||
#nsx_api_cert_file = <None> |
|||
|
|||
# Path to NSX client private key file. If specified, the nsx_api_user and |
|||
# nsx_api_password options will be ignored. Must be specified along with |
|||
# nsx_api_cert_file option |
|||
#nsx_api_private_key_file = <None> |
|||
|
|||
# IP address of one or more NSX managers separated by commas. The IP |
|||
# address should be of the form: |
|||
# [<scheme>://]<ip_adress>[:<port>] |
|||
# If scheme is not provided https is used. If port is not provided port 80 |
|||
# is used for http and port 443 for https. |
|||
#nsx_api_managers = [] |
|||
|
|||
# If True, skip fatal errors when no endpoint in the NSX management cluster |
|||
# is available to serve a request, and retry the request instead |
|||
#cluster_unavailable_retry = False |
|||
|
|||
# Maximum number of times to retry API requests upon stale revision errors. |
|||
#retries = 10 |
|||
|
|||
# Specify one or a list of CA bundle files to use in verifying the NSX |
|||
# Manager server certificate. This option is ignored if "insecure" is set |
|||
# to True. If "insecure" is set to False and "ca_file" is unset, the |
|||
# "thumbprint" will be used. If "thumbprint" is unset, the system root CAs |
|||
# will be used to verify the server certificate. |
|||
#ca_file = [] |
|||
|
|||
# Specify one or a list of thumbprint strings to use in verifying the NSX |
|||
# Manager server certificate. This option is ignored if "insecure" is set |
|||
# to True or "ca_file" is defined. |
|||
#thumbprint = [] |
|||
|
|||
# If true, the NSX Manager server certificate is not verified. If false the |
|||
# CA bundle specified via "ca_file" will be used or if unset the |
|||
# "thumbprint" will be used. If "thumbprint" is unset, the default system |
|||
# root CAs will be used. |
|||
#insecure = False |
|||
|
|||
# The time in seconds before aborting a HTTP connection to a NSX manager. |
|||
#http_timeout = 10 |
|||
|
|||
# The time in seconds before aborting a HTTP read response from a NSX |
|||
# manager. |
|||
#http_read_timeout = 180 |
|||
|
|||
# Maximum number of times to retry a HTTP connection. |
|||
#http_retries = 3 |
|||
|
|||
# Maximum concurrent connections to all NSX managers. If multiple NSX |
|||
# managers are configured, connections will be spread evenly across all |
|||
# managers, rounded down to the nearest integer. Each NSX manager will have |
|||
# at least 1 connection. This value should be a multiple of |
|||
# [nsx_v3]nsx_api_managers length. |
|||
#concurrent_connections = 10 |
|||
|
|||
# The amount of time in seconds to wait before ensuring connectivity to the |
|||
# NSX manager if no manager connection has been used. |
|||
#conn_idle_timeout = 10 |
|||
|
|||
# Number of times a HTTP redirect should be followed. |
|||
#redirects = 2 |
|||
|
|||
# Subnet prefix of IP block. |
|||
#subnet_prefix = 24 |
|||
|
|||
# Subnet prefix for v6 IP blocks |
|||
#v6_subnet_prefix = 64 |
|||
|
|||
|
|||
# Indicates whether distributed firewall DENY rules are logged. |
|||
#log_dropped_traffic = False |
|||
|
|||
# Indicates whether distributed firewall rules are logged. Option 'ALL' |
|||
# will enable logging for all DFW rules (both DENY and ALLOW), and option |
|||
# 'DENY' will enable logging only for DENY rules. Remove this config if no |
|||
# logging is desired |
|||
# Choices: ALL DENY <None> |
|||
#log_firewall_traffic = <None> |
|||
|
|||
|
|||
# Option to use native load balancer or not |
|||
#use_native_loadbalancer = True |
|||
|
|||
|
|||
# Option to auto scale layer 4 load balancer or not. If set to True, NCP |
|||
# will create additional LB when necessary upon K8s Service of type LB |
|||
# creation/update. |
|||
#l4_lb_auto_scaling = True |
|||
|
|||
# Option to use native load balancer or not when ingress class annotation |
|||
# is missing. Only effective if use_native_loadbalancer is set to true |
|||
#default_ingress_class_nsx = True |
|||
|
|||
# Path to the default certificate file for HTTPS load balancing. Must be |
|||
# specified along with lb_priv_key_path option |
|||
#lb_default_cert_path = <None> |
|||
|
|||
# Path to the private key file for default certificate for HTTPS load |
|||
# balancing. Must be specified along with lb_default_cert_path option |
|||
#lb_priv_key_path = <None> |
|||
|
|||
# Option to set load balancing algorithm in load balancer pool object. |
|||
# Choices: ROUND_ROBIN LEAST_CONNECTION IP_HASH WEIGHTED_ROUND_ROBIN |
|||
#pool_algorithm = WEIGHTED_ROUND_ROBIN |
|||
|
|||
# Option to set load balancer service size. MEDIUM Edge VM (4 vCPU, 8GB) |
|||
# only supports SMALL LB. LARGE Edge VM (8 vCPU, 16GB) only supports MEDIUM |
|||
# and SMALL LB. Bare Metal Edge (IvyBridge, 2 socket, 128GB) supports |
|||
# LARGE, MEDIUM and SMALL LB |
|||
# Choices: SMALL MEDIUM LARGE |
|||
#service_size = SMALL |
|||
|
|||
# Option to set load balancer persistence option. If cookie is selected, |
|||
# cookie persistence will be offered.If source_ip is selected, source IP |
|||
# persistence will be offered for ingress traffic through L7 load balancer |
|||
# Choices: <None> cookie source_ip |
|||
#l7_persistence = <None> |
|||
|
|||
# An integer for LoadBalancer side timeout value in seconds on layer 7 |
|||
# persistence profile, if the profile exists. |
|||
#l7_persistence_timeout = 10800 |
|||
|
|||
# Option to set load balancer persistence option. If source_ip is selected, |
|||
# source IP persistence will be offered for ingress traffic through L4 load |
|||
# balancer |
|||
# Choices: <None> source_ip |
|||
#l4_persistence = <None> |
|||
|
|||
# Option to set distributed load balancer source ip persistence option, |
|||
# only available when use_native_dlb = True |
|||
# Choices: <None> source_ip |
|||
#dlb_l4_persistence = <None> |
|||
|
|||
|
|||
# Resource ID of the container ip blocks that will be used for creating |
|||
# subnets for no-SNAT projects. If specified, no-SNAT projects will use |
|||
# these ip blocks ONLY. Otherwise they will use container_ip_blocks |
|||
#no_snat_ip_blocks = [] |
|||
|
|||
# Resource ID of the external ip pools that will be used for allocating IP |
|||
# addresses which will be used for translating container IPs via SNAT |
|||
# rules. If policy_nsxapi is enabled, it also support automatically |
|||
# creating the ip pools. The definition is a comma separated list: |
|||
# CIDR,IP_1-IP_2,... Mixing different formats (e.g. UUID, CIDR&IP_Range) is |
|||
# not supported. |
|||
#external_ip_pools = [] |
|||
|
|||
|
|||
# Resource ID of the container ip blocks that will be used for creating |
|||
# subnets. If name, it must be unique. If policy_nsxapi is enabled, it also |
|||
# support automatically creating the IP blocks. The definition is a comma |
|||
# separated list: CIDR,CIDR,... Mixing different formats (e.g. UUID,CIDR) |
|||
# is not supported. |
|||
#container_ip_blocks = [] |
|||
|
|||
# Resource ID of the top-tier router for the container cluster network, |
|||
# which could be either tier0 or tier1. If policy_nsxapi is enabled, should |
|||
# be ID of a tier0/tier1 gateway. |
|||
#top_tier_router = <None> |
|||
|
|||
# Option to use single-tier router for the container cluster network |
|||
#single_tier_topology = True |
|||
|
|||
# Option to use single-tier router for the container cluster network. Each |
|||
# namespace will have dedicated tier-1 router created. Namespaces with |
|||
# "sr_shared_res: true" annotation will share t1 and lbs. |
|||
#single_tier_sr_topology = False |
|||
|
|||
# Resource ID of the external ip pools that will be used only for |
|||
# allocating IP addresses for Ingress controller and LB service. If |
|||
# policy_nsxapi is enabled, it also supports automatically creating the ip |
|||
# pools. The definition is a comma separated list: CIDR,IP_1-IP_2,... |
|||
# Mixing different formats (e.g. UUID, CIDR&IP_Range) is not supported. |
|||
#external_ip_pools_lb = [] |
|||
|
|||
# Resource ID of the NSX overlay transport zone that will be used for |
|||
# creating logical switches for container networking. It must refer to an |
|||
# already existing resource on NSX and every transport node where VMs |
|||
# hosting containers are deployed must be enabled on this transport zone |
|||
#overlay_tz = <None> |
|||
|
|||
# Name of the enforcement point used to look up overlay transport zones and |
|||
# edge cluster paths, e.g. vmc-enforcementpoint, default, etc. |
|||
#enforcement_point = <None> |
|||
|
|||
# Resource ID of the lb service that can be attached by virtual servers |
|||
#lb_service = <None> |
|||
|
|||
# Resource ID of the IPSet containing the IPs of all the virtual servers |
|||
#lb_vs_ip_set = <None> |
|||
|
|||
# Enable X_forward_for for ingress. Available values are INSERT or REPLACE. |
|||
# When this config is set, if x_forwarded_for is missing, LB will add |
|||
# x_forwarded_for in the request header with value client ip. When |
|||
# x_forwarded_for is present and its set to REPLACE, LB will replace |
|||
# x_forwarded_for in the header to client_ip. When x_forwarded_for is |
|||
# present and its set to INSERT, LB will append client_ip to |
|||
# x_forwarded_for in the header. If not wanting to use x_forwarded_for, |
|||
# remove this config |
|||
# Choices: <None> INSERT REPLACE |
|||
#x_forwarded_for = <None> |
|||
|
|||
|
|||
# Resource ID of the firewall section that will be used to create firewall |
|||
# sections below this mark section |
|||
#top_firewall_section_marker = <None> |
|||
|
|||
# Resource ID of the firewall section that will be used to create firewall |
|||
# sections above this mark section |
|||
#bottom_firewall_section_marker = <None> |
|||
|
|||
# Replication mode of container logical switch, set SOURCE for cloud as it |
|||
# only supports head replication mode |
|||
# Choices: MTEP SOURCE |
|||
#ls_replication_mode = MTEP |
|||
|
|||
|
|||
|
|||
# The resource which NCP will search tag 'node_name' on, to get parent VIF |
|||
# or transport node uuid for container LSP API context field. For HOSTVM |
|||
# mode, it will search tag on LSP. For BM mode, it will search tag on LSP |
|||
# then search TN. For CLOUD mode, it will search tag on VM. For WCP_WORKER |
|||
# mode, it will search TN by hostname. |
|||
# Choices: tag_on_lsp tag_on_tn tag_on_vm hostname_on_tn |
|||
#search_node_tag_on = tag_on_lsp |
|||
|
|||
# Determines which kind of information to be used as VIF app_id. Defaults |
|||
# to pod_resource_key. In WCP mode, pod_uid is used. |
|||
# Choices: pod_resource_key pod_uid |
|||
#vif_app_id_type = pod_resource_key |
|||
|
|||
|
|||
# If this value is not empty, NCP will append it to nameserver list |
|||
#dns_servers = [] |
|||
|
|||
# Set this to True to enable NCP to report errors through NSXError CRD. |
|||
#enable_nsx_err_crd = False |
|||
|
|||
# Maximum number of virtual servers allowed to create in cluster for |
|||
# LoadBalancer type of services. |
|||
#max_allowed_virtual_servers = 9223372036854775807 |
|||
|
|||
# Edge cluster ID needed when creating Tier1 router for loadbalancer |
|||
# service. Information could be retrieved from Tier0 router |
|||
#edge_cluster = <None> |
|||
|
|||
# Inventory feature switch |
|||
#enable_inventory = True |
|||
|
|||
|
|||
|
|||
|
|||
|
|||
# For internal container network CIDR, NCP adds redistribution deny rule to |
|||
# stop T0 router advertise subnets to external network outside of T0 |
|||
# router. If BGP or route redistribution is disabled, or |
|||
# T1_CONNECTED/TIER1_SEGMENT option is not selected, NCP would not add the |
|||
# deny rule. If users enable BGP and route redistribution, or select |
|||
# T1_CONNECTED/TIER1_SEGMENT option after NCP starts, user needs to restart |
|||
# NCP to let NCP set deny rule. If there is already a route map attached, |
|||
# NCP will create IP prefix list on the existing route map. Otherwise NCP |
|||
# will create a new route map and attach it. This option could be used only |
|||
# in SNAT mode and when policy_nsxapi = True. |
|||
#configure_t0_redistribution = False |
|||
|
|||
|
|||
# Health check interval for nsx lb monitor profile |
|||
#lb_hc_profile_interval = 5 |
|||
|
|||
# Health check timeout for nsx lb monitor profile |
|||
#lb_hc_profile_timeout = 15 |
|||
|
|||
# Health check failed count for nsx lb monitor profile. Pool member failed |
|||
# for this amount will be marked as down. |
|||
#lb_hc_profile_fall_count = 3 |
|||
|
|||
# Health check rise count for nsx lb monitor profile. Pool members |
|||
# previously marked down will be brought up, if succeed in the health check |
|||
# for this amount fo time. |
|||
#lb_hc_profile_rise_count = 3 |
|||
|
|||
# Maximum size of the buffer used to store HTTP request headers for L7 |
|||
# virtual servers in cluster. A request with header larger than this value |
|||
# will be processed as best effort whereas a request with header below this |
|||
# value is guaranteed to be processed. |
|||
#lb_http_request_header_size = 1024 |
|||
|
|||
# Maximum size of the buffer used to store HTTP response headers for all L7 |
|||
# virtual servers in cluster. A response with header larger than this value |
|||
# will be dropped. |
|||
#lb_http_response_header_size = 4096 |
|||
|
|||
# Maximum server idle time in seconds for L7 virtual servers in cluster. If |
|||
# backend server does not send any packet within this time, the connection |
|||
# is closed. |
|||
#lb_http_response_timeout = 60 |
|||
|
|||
# Determines the behavior when a Tier-1 instance restarts after a failure. |
|||
# If set to PREEMPTIVE, the preferred node will take over, even if it |
|||
# causes another failure. If set to NON_PREEMPTIVE, then the instance that |
|||
# restarted will remain secondary. Applicable to Tier-1 across cluster that |
|||
# was created by NCP and has edge cluster configured. |
|||
# Choices: PREEMPTIVE NON_PREEMPTIVE |
|||
#failover_mode = NON_PREEMPTIVE |
|||
|
|||
# Set this to ENABLE to enable NCP enforced pool member limit for all load |
|||
# balancer servers in cluster. Set this to CRD_LB_ONLY will only enforce |
|||
# the limit for load balancer servers created using lb CRD. Set this to |
|||
# DISABLE to turn off all limit checks. This option requires |
|||
# relax_scale_validation set to True, l4_lb_auto_scaling set to False, and |
|||
# works on Policy API only. When not disabled, NCP will enforce a pool |
|||
# member limit on LBS to prevent one LBS from using up all resources on |
|||
# edge nodes. |
|||
# Choices: DISABLE ENABLE CRD_LB_ONLY |
|||
#ncp_enforced_pool_member_limit = DISABLE |
|||
|
|||
# Maximum number of pool member allowed for each small load balancer |
|||
# service. Requires ncp_enforced_pool_member_limit set to ENABLE or |
|||
# CRD_LB_ONLY to take effect. |
|||
#members_per_small_lbs = 2000 |
|||
|
|||
# Maximum number of pool member allowed for each medium load balancer |
|||
# service. Requires ncp_enforced_pool_member_limit set to ENABLE or |
|||
# CRD_LB_ONLY to take effect. |
|||
#members_per_medium_lbs = 2000 |
|||
|
|||
# Interval in seconds to clean empty segments. |
|||
#segment_gc_interval = 600 |
|||
|
|||
# Determines the mode NCP limits rate when sending API calls to NSX. |
|||
# Choices: NO_LIMIT SLIDING_WINDOW ADAPTIVE_AIMD |
|||
#api_rate_limit_mode = ADAPTIVE_AIMD |
|||
|
|||
# When nsx_v3.api_rate_limit_mode is not set to NO_LIMIT, determines the |
|||
# maximum number of API calls sent per manager ip per second. Should be a |
|||
# positive integer. |
|||
#max_api_rate = 40 |
|||
|
|||
# Resource ID of the client SSL profile which will be used by Loadbalancer |
|||
# while participating in TLS handshake with the client |
|||
#client_ssl_profile = <None> |
|||
|
|||
# Enable security policy notification, If this optionis enabled, NCP will |
|||
# configure container network afterNSX creates logical port and finishes |
|||
# security policysynchronization |
|||
#wait_for_security_policy_sync = False |
|||
|
|||
|
|||
# Set this to True to enable rule tag as cluster name in DFW logs for k8s |
|||
#enable_rule_tag = True |
|||
|
|||
|
|||
[ha] |
|||
|
|||
|
|||
# Time duration in seconds of mastership timeout. NCP instance will remain |
|||
# master for this duration after elected. Note that the heartbeat period |
|||
# plus the update timeout must not be greater than this period. This is |
|||
# done to ensure that the master instance will either confirm liveness or |
|||
# fail before the timeout. |
|||
#master_timeout = 18 |
|||
|
|||
# Time in seconds between heartbeats for elected leader. Once an NCP |
|||
# instance is elected master, it will periodically confirm liveness based |
|||
# on this value. |
|||
#heartbeat_period = 6 |
|||
|
|||
# Timeout duration in seconds for update to election resource. The default |
|||
# value is calculated by subtracting heartbeat period from master timeout. |
|||
# If the update request does not complete before the timeout it will be |
|||
# aborted. Used for master heartbeats to ensure that the update finishes or |
|||
# is aborted before the master timeout occurs. |
|||
#update_timeout = <None> |
|||
|
|||
|
|||
[k8s] |
|||
|
|||
# Kubernetes API server IP address. |
|||
#apiserver_host_ip = <None> |
|||
|
|||
# Kubernetes API server port. |
|||
#apiserver_host_port = <None> |
|||
|
|||
# Full path of the Token file to use for authenticating with the k8s API |
|||
# server. |
|||
client_token_file = /var/run/secrets/kubernetes.io/serviceaccount/token |
|||
|
|||
# Full path of the client certificate file to use for authenticating with |
|||
# the k8s API server. It must be specified together with |
|||
# "client_private_key_file". |
|||
#client_cert_file = <None> |
|||
|
|||
# Full path of the client private key file to use for authenticating with |
|||
# the k8s API server. It must be specified together with |
|||
# "client_cert_file". |
|||
#client_private_key_file = <None> |
|||
|
|||
# Specify a CA bundle file to use in verifying the k8s API server |
|||
# certificate. |
|||
ca_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt |
|||
|
|||
# Specify whether ingress controllers are expected to be deployed in |
|||
# hostnework mode or as regular pods externally accessed via NAT |
|||
# Choices: hostnetwork nat |
|||
#ingress_mode = hostnetwork |
|||
|
|||
# Log level for the kubernetes adaptor. Ignored if debug is True |
|||
# Choices: NOTSET DEBUG INFO WARNING ERROR CRITICAL |
|||
#loglevel = <None> |
|||
|
|||
# The default HTTP ingress port for non-NSX ingress controllers in NAT |
|||
# mode. |
|||
#http_ingress_port = 80 |
|||
|
|||
# The default HTTPS ingress port for non-NSX ingress controllers in NAT |
|||
# mode. |
|||
#https_ingress_port = 443 |
|||
|
|||
|
|||
# Specify thread pool size to process resource events |
|||
#resource_watcher_thread_pool_size = 1 |
|||
|
|||
# User specified IP address for HTTP and HTTPS ingresses |
|||
#http_and_https_ingress_ip = <None> |
|||
|
|||
# Set this to True to enable NCP to create tier1 router, first segment and |
|||
# default SNAT IP for VirtualNetwork CRD, and then create segment port for |
|||
# VM through VirtualNetworkInterface CRD. |
|||
#enable_vnet_crd = False |
|||
|
|||
# Set this to True to enable NCP to create LoadBalancer on a Tier-1 for |
|||
# LoadBalancer CRD. This option does not support LB autoscaling. |
|||
#enable_lb_crd = False |
|||
|
|||
# Option to set the type of baseline cluster policy. ALLOW_CLUSTER creates |
|||
# an explicit baseline policy to allow any pod to communicate any other pod |
|||
# within the cluster. ALLOW_NAMESPACE creates an explicit baseline policy |
|||
# to allow pods within the same namespace to communicate with each other. |
|||
# By default, no baseline rule will be created and the cluster will assume |
|||
# the default behavior as specified by the backend. Modification is not |
|||
# supported after the value is set. |
|||
# Choices: <None> allow_cluster allow_namespace |
|||
#baseline_policy_type = <None> |
|||
|
|||
# Maximum number of endpoints allowed to create for a service. |
|||
#max_allowed_endpoints = 1000 |
|||
|
|||
# Set this to True to enable NCP reporting NSX backend error to k8s object |
|||
# using k8s event |
|||
#enable_ncp_event = False |
|||
|
|||
# Set this to True to enable multus to create multiple interfaces for one |
|||
# pod. If passthrough interface is usedas additional interface, user should |
|||
# deploy device plugin to provide device allocation information for NCP.Pod |
|||
# annotations under prefix "k8s.v1.cni.cncf.io" cannot be modified after |
|||
# pod realized. User defined IP will not be allocated from Segment IPPool. |
|||
# "gateway" in NetworkAttachmentDefinition is not used to configure |
|||
# secondary interfaces. Default gateway of pod is configured by primary |
|||
# interface. User must define IP and/or MAC if no "ipam" is configured.Only |
|||
# available if node type is HOSTVM |
|||
#enable_multus = True |
|||
|
|||
|
|||
# Interval of polling loadbalancer statistics. Default to60 seconds. |
|||
#lb_statistic_monitor_interval = 60 |
|||
|
|||
# This option is for toggling process of network CRD.It should be set to |
|||
# False when the network status setting is done by OCP4 NetworkOperator |
|||
#process_oc_network = True |
|||
|
|||
|
|||
|
|||
#[nsx_v3] |
|||
# Deprecated option: tier0_router |
|||
# Replaced by [nsx_v3] top_tier_router |
|||
|
|||
# Deprecated option: deny_subnets_redistribution |
|||
# Replaced by [nsx_v3] configure_t0_redistribution |
|||
|
|||
[nsx_kube_proxy] |
|||
|
|||
# The way to process service configuration, set into OVS flow or write to |
|||
# nestdb, |
|||
# Choices: ovs nestdb |
|||
#config_handler = ovs |
|||
|
|||
|
|||
|
|||
|
|||
[nsx_node_agent] |
|||
|
|||
# Prefix of node /proc and /var/run/netns path to mount on nsx_node_agent |
|||
# DaemonSet |
|||
#proc_mount_path_prefix = /host |
|||
|
|||
|
|||
|
|||
|
|||
# The log level of NSX RPC library |
|||
# Choices: NOTSET DEBUG INFO WARNING ERROR CRITICAL |
|||
#nsxrpc_loglevel = ERROR |
|||
|
|||
# OVS bridge name |
|||
ovs_bridge = br-int |
|||
|
|||
# The time in seconds for nsx_node_agent to wait CIF config from HyperBus |
|||
# before returning to CNI |
|||
#config_retry_timeout = 300 |
|||
|
|||
# The time in seconds for nsx_node_agent to backoff before re-using an |
|||
# existing cached CIF to serve CNI request. Must be less than |
|||
# config_retry_timeout. |
|||
#config_reuse_backoff_time = 15 |
|||
|
|||
|
|||
# The OVS uplink OpenFlow port where to apply the NAT rules to. |
|||
ovs_uplink_port = eth0 |
|||
|
|||
# Set this to True if you want to install and use the NSX-OVS kernel |
|||
# module. If the host OS is supported, it will be installed by nsx-ncp- |
|||
# bootstrap and used by nsx-ovs container in nsx-node-agent pod. Note that |
|||
# you would have to add (uncomment) the volumes and mounts in the nsx-ncp- |
|||
# bootstrap DS and add SYS_MODULE capability in nsx-ovs container spec in |
|||
# nsx-node-agent DS. Failing to do so will result in failure of |
|||
# installation and/or kernel upgrade of NSX-OVS kernelmodule. |
|||
#use_nsx_ovs_kernel_module = False |
|||
|
|||
# The time in seconds for nsx_node_agent to call OVS command. Please |
|||
# increase the time if OVS is in heavy load to create/delete ports |
|||
#ovs_operation_timeout = 5 |
|||
|
|||
# Set to true to allow the CNI plugin to enable IPv6 container interfaces |
|||
#enable_ipv6 = False |
|||
|
|||
# Set to True if DHCP is configured on the "ovs_uplink_port". "auto" will |
|||
# try to automatically infer it but it only works on CoreOS. On other |
|||
# types host OS, it defaults to False |
|||
# Choices: True False auto |
|||
#is_dhcp_configured_on_ovs_uplink_port = auto |
|||
|
|||
# The MTU value for nsx-cni |
|||
#mtu = 1500 |
|||
|
|||
# Applicable only in PKS. If set, nsx-node-agent watches for addition, |
|||
# removal, and update of nodelocaldns DaemonSet. Upon a change, it |
|||
# terminates and is restarted by the monit agent |
|||
#enable_nodelocaldns_monitoring = False |
|||
|
|||
# The waiting time before nsx-node-agent returns response to CNI plugin, |
|||
# there is a potential timing issue between port creation and related |
|||
# firewall config update on Hypervisor host |
|||
#waiting_before_cni_response = 0 |
@ -0,0 +1,8 @@ |
|||
apiVersion: v1 |
|||
kind: Secret |
|||
type: kubernetes.io/tls |
|||
metadata: |
|||
name: lb-secret |
|||
labels: |
|||
{{- include "nsx-ncp-operator.labels" . | nindent 4 }} |
|||
data: {tls.crt: "", tls.key: ""} |
@ -0,0 +1,9 @@ |
|||
apiVersion: operator.nsx.vmware.com/v1 |
|||
kind: NcpInstall |
|||
metadata: |
|||
name: {{ include "nsx-ncp-operator.fullname" . }}-ncpinstall |
|||
labels: |
|||
{{- include "nsx-ncp-operator.labels" . | nindent 4 }} |
|||
spec: |
|||
ncpReplicas: 1 |
|||
addNodeTag: false |
@ -0,0 +1,8 @@ |
|||
apiVersion: v1 |
|||
data: {tls.crt: "", tls.key: "", tls.ca: ""} |
|||
kind: Secret |
|||
metadata: |
|||
name: nsx-secret |
|||
labels: |
|||
{{- include "nsx-ncp-operator.labels" . | nindent 4 }} |
|||
type: kubernetes.io/tls |
@ -0,0 +1,63 @@ |
|||
apiVersion: apps/v1 |
|||
kind: Deployment |
|||
metadata: |
|||
name: {{ include "nsx-ncp-operator.fullname" . }} |
|||
labels: |
|||
{{- include "nsx-ncp-operator.labels" . | nindent 4 }} |
|||
spec: |
|||
replicas: 1 |
|||
selector: |
|||
matchLabels: |
|||
{{- include "nsx-ncp-operator.selectorLabels" . | nindent 6 }} |
|||
template: |
|||
metadata: |
|||
{{- with .Values.podAnnotations }} |
|||
annotations: |
|||
{{- toYaml . | nindent 8 }} |
|||
{{- end }} |
|||
labels: |
|||
{{- include "nsx-ncp-operator.selectorLabels" . | nindent 8 }} |
|||
spec: |
|||
{{- with .Values.imagePullSecrets }} |
|||
imagePullSecrets: |
|||
{{- toYaml . | nindent 8 }} |
|||
{{- end }} |
|||
hostNetwork: true |
|||
serviceAccountName: {{ include "nsx-ncp-operator.serviceAccountName" . }} |
|||
volumes: |
|||
- hostPath: {path: /etc/os-release} |
|||
name: host-os-release |
|||
containers: |
|||
- name: nsx-ncp-operator |
|||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" |
|||
securityContext: |
|||
{{- toYaml .Values.securityContext | nindent 12 }} |
|||
command: ["/bin/bash", "-c", "nsx-ncp-operator --zap-time-encoding=iso8601"] |
|||
volumeMounts: |
|||
- {mountPath: /host/etc/os-release, name: host-os-release} |
|||
imagePullPolicy: {{ .Values.image.pullPolicy }} |
|||
env: |
|||
- name: POD_NAME |
|||
valueFrom: |
|||
fieldRef: |
|||
fieldPath: metadata.name |
|||
- name: OPERATOR_NAME |
|||
value: "nsx-ncp-operator" |
|||
- name: NCP_IMAGE |
|||
value: "nsx-ncp:latest" |
|||
- name: WATCH_NAMESPACE |
|||
value: "{{ .Release.Namespace }}" |
|||
resources: |
|||
{{- toYaml .Values.resources | nindent 12 }} |
|||
{{- with .Values.nodeSelector }} |
|||
nodeSelector: |
|||
{{- toYaml . | nindent 8 }} |
|||
{{- end }} |
|||
{{- with .Values.affinity }} |
|||
affinity: |
|||
{{- toYaml . | nindent 8 }} |
|||
{{- end }} |
|||
{{- with .Values.tolerations }} |
|||
tolerations: |
|||
{{- toYaml . | nindent 8 }} |
|||
{{- end }} |
@ -0,0 +1,48 @@ |
|||
kind: ClusterRole |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
name: nsx-ncp-operator |
|||
labels: |
|||
{{- include "nsx-ncp-operator.labels" . | nindent 4 }} |
|||
rules: |
|||
- apiGroups: [''] |
|||
resources: [pods, pods/log, pods/exec, configmaps, namespaces, serviceaccounts, secrets, nodes/status] |
|||
verbs: [create, get, list, patch, delete, update, watch, deletecollection] |
|||
- apiGroups: [apps] |
|||
resources: [deployments, daemonsets] |
|||
verbs: [create, get, list, patch, delete, update, watch] |
|||
- apiGroups: [rbac.authorization.k8s.io] |
|||
resources: [clusterroles, clusterrolebindings, roles, rolebindings] |
|||
verbs: [create, get, list, patch, update, watch, delete] |
|||
- apiGroups: [operator.nsx.vmware.com] |
|||
resources: [ncpinstalls, ncpinstalls/status] |
|||
verbs: [get, list, watch, patch, update] |
|||
# Required by nsx-node-agent |
|||
- apiGroups: [''] |
|||
resources: [endpoints, services] |
|||
verbs: [get, list, watch] |
|||
# Required by nsx-ncp |
|||
- apiGroups: ['', extensions, networking.k8s.io] |
|||
resources: [namespaces, ingresses, services, pods, networkpolicies, routes] |
|||
verbs: [get, watch, list, update, patch] |
|||
- apiGroups: [nsx.vmware.com] |
|||
resources: [nsxerrors, nsxlocks, ncpconfigs] |
|||
verbs: [create, get, list, patch, delete, update] |
|||
- apiGroups: ['', extensions, networking.k8s.io] |
|||
resources: [ingresses/status, services/status, routes/status] |
|||
verbs: [replace, update, patch] |
|||
- apiGroups: [k8s.cni.cncf.io] |
|||
resources: [network-attachment-definitions] |
|||
verbs: [get, list, watch] |
|||
- apiGroups: [apiextensions.k8s.io] |
|||
resources: [customresourcedefinitions] |
|||
verbs: [create, get, list, patch, update, watch, delete] |
|||
- apiGroups: ['', extensions, networking.k8s.io] |
|||
resources: [deployments, endpoints, ingressclasses, nodes, pods/log, replicationcontrollers, secrets] |
|||
verbs: [get, list, watch] |
|||
- apiGroups: [vmware.com] |
|||
resources: [loadbalancers, loadbalancers/status, nsxlbmonitors, nsxlbmonitors/status, virtualnetworkinterfaces, virtualnetworkinterfaces/status, virtualnetworks, virtualnetworks/status] |
|||
verbs: [create, get, list, patch, update, watch, delete] |
|||
- apiGroups: [policy] |
|||
resources: [podsecuritypolicies] |
|||
verbs: [create, get, list, patch, update, watch, delete, use] |
@ -0,0 +1,14 @@ |
|||
kind: ClusterRoleBinding |
|||
apiVersion: rbac.authorization.k8s.io/v1 |
|||
metadata: |
|||
name: nsx-ncp-operator |
|||
labels: |
|||
{{- include "nsx-ncp-operator.labels" . | nindent 4 }} |
|||
subjects: |
|||
- kind: ServiceAccount |
|||
name: nsx-ncp-operator |
|||
namespace: {{ .Release.Namespace }} |
|||
roleRef: |
|||
kind: ClusterRole |
|||
name: nsx-ncp-operator |
|||
apiGroup: rbac.authorization.k8s.io |
@ -0,0 +1,12 @@ |
|||
{{- if .Values.serviceAccount.create -}} |
|||
apiVersion: v1 |
|||
kind: ServiceAccount |
|||
metadata: |
|||
name: {{ include "nsx-ncp-operator.serviceAccountName" . }} |
|||
labels: |
|||
{{- include "nsx-ncp-operator.labels" . | nindent 4 }} |
|||
{{- with .Values.serviceAccount.annotations }} |
|||
annotations: |
|||
{{- toYaml . | nindent 4 }} |
|||
{{- end }} |
|||
{{- end }} |
@ -0,0 +1,59 @@ |
|||
# Default values for nsx-ncp-operator. |
|||
# This is a YAML-formatted file. |
|||
# Declare variables to be passed into your templates. |
|||
|
|||
image: |
|||
repository: vmware/nsx-container-plugin-operator |
|||
pullPolicy: IfNotPresent |
|||
# Overrides the image tag whose default is the chart appVersion. |
|||
tag: "" |
|||
|
|||
imagePullSecrets: [] |
|||
nameOverride: "" |
|||
fullnameOverride: "" |
|||
|
|||
serviceAccount: |
|||
# Specifies whether a service account should be created |
|||
create: true |
|||
# Annotations to add to the service account |
|||
annotations: {} |
|||
# The name of the service account to use. |
|||
# If not set and create is true, a name is generated using the fullname template |
|||
name: "nsx-ncp-operator" |
|||
|
|||
podAnnotations: {} |
|||
|
|||
podSecurityContext: {} |
|||
# fsGroup: 2000 |
|||
|
|||
securityContext: {} |
|||
# capabilities: |
|||
# drop: |
|||
# - ALL |
|||
# readOnlyRootFilesystem: true |
|||
# runAsNonRoot: true |
|||
# runAsUser: 1000 |
|||
|
|||
resources: {} |
|||
# We usually recommend not to specify default resources and to leave this as a conscious |
|||
# choice for the user. This also increases chances charts run on environments with little |
|||
# resources, such as Minikube. If you do want to specify resources, uncomment the following |
|||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'. |
|||
# limits: |
|||
# cpu: 100m |
|||
# memory: 128Mi |
|||
# requests: |
|||
# cpu: 100m |
|||
# memory: 128Mi |
|||
|
|||
nodeSelector: {} |
|||
|
|||
tolerations: |
|||
- effect: NoSchedule |
|||
key: node-role.kubernetes.io/master |
|||
- effect: NoSchedule |
|||
key: node.kubernetes.io/not-ready |
|||
- effect: NoSchedule |
|||
key: node.kubernetes.io/network-unavailable |
|||
|
|||
affinity: {} |
Loading…
Reference in new issue