Public Release
This commit is contained in:
160
examples/config.yaml
Normal file
160
examples/config.yaml
Normal file
@ -0,0 +1,160 @@
|
||||
# This is the nebula example configuration file. You must edit, at a minimum, the static_host_map, lighthouse, and firewall sections
|
||||
|
||||
|
||||
# PKI defines the location of credentials for this node. Each of these can also be inlined by using the yaml ": |" syntax.
|
||||
pki:
|
||||
ca: /etc/nebula/ca.crt
|
||||
cert: /etc/nebula/host.crt
|
||||
key: /etc/nebula/host.key
|
||||
#blacklist is a list of certificate fingerprints that we will refuse to talk to
|
||||
#blacklist:
|
||||
# - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
|
||||
|
||||
# The static host map defines a set of hosts with with fixed IP addresses on the internet (or any network).
|
||||
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
|
||||
# The syntax is:
|
||||
# "{nebula ip}": ["{routable ip/dns name}:{routable port}"]
|
||||
# Example, if your lighthouse has the nebula IP of 192.168.100.1 and has the real ip address of 100.64.22.11 and runs on port 4242:
|
||||
static_host_map:
|
||||
"192.168.100.1": ["100.64.22.11:4242"]
|
||||
|
||||
|
||||
lighthouse:
|
||||
# am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
|
||||
# you have configured to be lighthouses in your network
|
||||
am_lighthouse: false
|
||||
# serve_dns optionally starts a dns listener that responds to various queries and can even be
|
||||
# delegated to for resolution
|
||||
#serve_dns: false
|
||||
# interval is the number of seconds between updates from this node to a lighthouse.
|
||||
# during updates, a node sends information about its current IP addresses to each node.
|
||||
interval: 60
|
||||
# hosts is a list of lighthouse hosts this node should report to and query from
|
||||
# IMPORTANT: THIS SHOULD BE EMPTY ON LIGHTHOUSE NODES
|
||||
hosts:
|
||||
- "192.168.100.1"
|
||||
|
||||
# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
|
||||
# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
|
||||
listen:
|
||||
host: 0.0.0.0
|
||||
port: 4242
|
||||
# Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
|
||||
# default is 64, does not support reload
|
||||
#batch: 64
|
||||
# Configure socket buffers for the udp side (outside), leave unset to use the system defaults. Values will be doubled by the kernel
|
||||
# Default is net.core.rmem_default and net.core.wmem_default (/proc/sys/net/core/rmem_default and /proc/sys/net/core/rmem_default)
|
||||
# Maximum is limited by memory in the system, SO_RCVBUFFORCE and SO_SNDBUFFORCE is used to avoid having to raise the system wide
|
||||
# max, net.core.rmem_max and net.core.wmem_max
|
||||
#read_buffer: 10485760
|
||||
#write_buffer: 10485760
|
||||
|
||||
|
||||
# Local range is used to define a hint about the local network range, which speeds up discovering the fastest
|
||||
# path to a network adjacent nebula node.
|
||||
#local_range: "172.16.0.0/24"
|
||||
|
||||
# Handshake mac is an optional network-wide handshake authentication step that is used to prevent nebula from
|
||||
# responding to handshakes from nodes not in possession of the shared secret. This is primarily used to prevent
|
||||
# detection of nebula nodes when someone is scanning a network.
|
||||
#handshake_mac:
|
||||
#key: "DONOTUSETHISKEY"
|
||||
# You can define multiple accepted keys
|
||||
#accepted_keys:
|
||||
#- "DONOTUSETHISKEY"
|
||||
#- "dontusethiseither"
|
||||
|
||||
# sshd can expose informational and administrative functions via ssh this is a
|
||||
#sshd:
|
||||
# Toggles the feature
|
||||
#enabled: true
|
||||
# Host and port to listen on, port 22 is not allowed for your safety
|
||||
#listen: 127.0.0.1:2222
|
||||
# A file containing the ssh host private key to use
|
||||
# A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
|
||||
#host_key: ./ssh_host_ed25519_key
|
||||
# A file containing a list of authorized public keys
|
||||
#authorized_users:
|
||||
#- user: steeeeve
|
||||
# keys can be an array of strings or single string
|
||||
#keys:
|
||||
#- "ssh public key string"
|
||||
|
||||
# Configure the private interface. Note: addr is baked into the nebula certificate
|
||||
tun:
|
||||
# Name of the device
|
||||
dev: nebula1
|
||||
# Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
|
||||
drop_local_broadcast: false
|
||||
# Toggles forwarding of multicast packets
|
||||
drop_multicast: false
|
||||
# Sets the transmit queue length, if you notice lots of transmit drops on the tun it may help to raise this number. Default is 500
|
||||
tx_queue: 500
|
||||
# Default MTU for every packet, safe setting is (and the default) 1300 for internet based traffic
|
||||
mtu: 1300
|
||||
# Route based MTU overrides, you have known vpn ip paths that can support larger MTUs you can increase/decrease them here
|
||||
routes:
|
||||
#- mtu: 8800
|
||||
# route: 10.0.0.0/16
|
||||
|
||||
# TODO
|
||||
# Configure logging level
|
||||
logging:
|
||||
# panic, fatal, error, warning, info, or debug. Default is info
|
||||
level: info
|
||||
# json or text formats currently available. Default is text
|
||||
format: text
|
||||
|
||||
#stats:
|
||||
#type: graphite
|
||||
#prefix: nebula
|
||||
#protocol: tcp
|
||||
#host: 127.0.0.1:9999
|
||||
#interval: 10s
|
||||
|
||||
#type: prometheus
|
||||
#listen: 127.0.0.1:8080
|
||||
#path: /metrics
|
||||
#namespace: prometheusns
|
||||
#subsystem: nebula
|
||||
#interval: 10s
|
||||
|
||||
# Nebula security group configuration
|
||||
firewall:
|
||||
conntrack:
|
||||
tcp_timeout: 120h
|
||||
udp_timeout: 3m
|
||||
default_timeout: 10m
|
||||
max_connections: 100000
|
||||
|
||||
# The firewall is default deny. There is no way to write a deny rule.
|
||||
# Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
|
||||
# Logical evaluation is roughly: port AND proto AND ca_sha AND ca_name AND (host OR group OR groups OR cidr)
|
||||
# - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
|
||||
# code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
|
||||
# proto: `any`, `tcp`, `udp`, or `icmp`
|
||||
# host: `any` or a literal hostname, ie `test-host`
|
||||
# group: `any` or a literal group name, ie `default-group`
|
||||
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
|
||||
# cidr: a CIDR, `0.0.0.0/0` is any.
|
||||
# ca_name: An issuing CA name
|
||||
# ca_sha: An issuing CA shasum
|
||||
|
||||
outbound:
|
||||
# Allow all outbound traffic from this node
|
||||
- port: any
|
||||
proto: any
|
||||
host: any
|
||||
|
||||
inbound:
|
||||
# Allow icmp between any nebula hosts
|
||||
- port: any
|
||||
proto: icmp
|
||||
host: any
|
||||
|
||||
# Allow tcp/443 from any host with BOTH laptop and home group
|
||||
- port: 443
|
||||
proto: tcp
|
||||
groups:
|
||||
- laptop
|
||||
- home
|
154
examples/quickstart-vagrant/README.md
Normal file
154
examples/quickstart-vagrant/README.md
Normal file
@ -0,0 +1,154 @@
|
||||
# Quickstart Guide
|
||||
|
||||
This guide is intended to bring up a vagrant environment with 1 lighthouse and 2 generic hosts running nebula.
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
There are two pre-requisites prior to bringing up the vagrant environment
|
||||
|
||||
- build the binaries locally for the vagrant deploy
|
||||
- create a virtualenv for ansible
|
||||
|
||||
### Building the binaries
|
||||
|
||||
Build the `nebula` and `nebula-cert` binaries for vagrant by doing the following
|
||||
|
||||
`make bin-vagrant` (under the src directory with Makefile)
|
||||
|
||||
For convenience, ansible will run this for you in every deploy (see `ansible/playbook.yml`)
|
||||
|
||||
### Creating the virtualenv
|
||||
|
||||
Within the `quickstart/` directory, do the following
|
||||
|
||||
```
|
||||
# make a virtual environment
|
||||
virtualenv venv
|
||||
|
||||
# get into the virtualenv
|
||||
source venv/bin/activate
|
||||
|
||||
# install ansible
|
||||
pip install -r requirements.yml
|
||||
```
|
||||
|
||||
## Bringing up the vagrant environment
|
||||
|
||||
A plugin that is used for the Vagrant environment is `vagrant-hostmanager`
|
||||
|
||||
To install, run
|
||||
|
||||
```
|
||||
vagrant plugin install vagrant-hostmanager
|
||||
```
|
||||
|
||||
All hosts within the Vagrantfile are brought up with
|
||||
|
||||
`vagrant up`
|
||||
|
||||
Once the boxes are up, go into the `ansible/` directory and deploy the playbook by running
|
||||
|
||||
`ansible-playbook playbook.yml -i inventory -u vagrant`
|
||||
|
||||
## Testing within the vagrant env
|
||||
|
||||
Once the ansible run is done, hop onto a vagrant box
|
||||
|
||||
`vagrant ssh generic1.vagrant`
|
||||
|
||||
or specifically
|
||||
|
||||
`ssh vagrant@<ip-address-in-vagrant-file` (password for the vagrant user on the boxes is `vagrant`)
|
||||
|
||||
Some quick tests once the vagrant boxes are up are to ping from `generic1.vagrant` to `generic2.vagrant` using
|
||||
their respective nebula ip address.
|
||||
|
||||
```
|
||||
vagrant@generic1:~$ ping 10.168.91.220
|
||||
PING 10.168.91.220 (10.168.91.220) 56(84) bytes of data.
|
||||
64 bytes from 10.168.91.220: icmp_seq=1 ttl=64 time=241 ms
|
||||
64 bytes from 10.168.91.220: icmp_seq=2 ttl=64 time=0.704 ms
|
||||
```
|
||||
|
||||
You can further verify that the allowed nebula firewall rules work by ssh'ing from 1 generic box to the other.
|
||||
|
||||
`ssh vagrant@<nebula-ip-address>` (password for the vagrant user on the boxes is `vagrant`)
|
||||
|
||||
See `/etc/nebula/config.yml` on a box for firewall rules.
|
||||
|
||||
To see full handshakes and hostmaps, change the logging config of `/etc/nebula/config.yml` on the vagrant boxes from
|
||||
info to debug.
|
||||
|
||||
You can watch nebula logs by running
|
||||
|
||||
```
|
||||
sudo journalctl -fu nebula
|
||||
```
|
||||
|
||||
Refer to the nebula src code directory's README for further instructions on configuring nebula.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Is nebula up and running?
|
||||
|
||||
Run and verify that
|
||||
|
||||
```
|
||||
ifconfig
|
||||
```
|
||||
|
||||
shows you an interface with the name `nebula1` being up.
|
||||
|
||||
```
|
||||
vagrant@generic1:~$ ifconfig nebula1
|
||||
nebula1: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1300
|
||||
inet 10.168.91.210 netmask 255.128.0.0 destination 10.168.91.210
|
||||
inet6 fe80::aeaf:b105:e6dc:936c prefixlen 64 scopeid 0x20<link>
|
||||
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
|
||||
RX packets 2 bytes 168 (168.0 B)
|
||||
RX errors 0 dropped 0 overruns 0 frame 0
|
||||
TX packets 11 bytes 600 (600.0 B)
|
||||
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
|
||||
```
|
||||
|
||||
### Connectivity
|
||||
|
||||
Are you able to ping other boxes on the private nebula network?
|
||||
|
||||
The following are the private nebula ip addresses of the vagrant env
|
||||
|
||||
```
|
||||
generic1.vagrant [nebula_ip] 10.168.91.210
|
||||
generic2.vagrant [nebula_ip] 10.168.91.220
|
||||
lighthouse1.vagrant [nebula_ip] 10.168.91.230
|
||||
```
|
||||
|
||||
Try pinging generic1.vagrant to and from any other box using its nebula ip above.
|
||||
|
||||
Double check the nebula firewall rules under /etc/nebula/config.yml to make sure that connectivity is allowed for your use-case if on a specific port.
|
||||
|
||||
```
|
||||
vagrant@lighthouse1:~$ grep -A21 firewall /etc/nebula/config.yml
|
||||
firewall:
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
default_timeout: 10m
|
||||
max_connections: 100,000
|
||||
|
||||
inbound:
|
||||
- proto: icmp
|
||||
port: any
|
||||
host: any
|
||||
- proto: any
|
||||
port: 22
|
||||
host: any
|
||||
- proto: any
|
||||
port: 53
|
||||
host: any
|
||||
|
||||
outbound:
|
||||
- proto: any
|
||||
port: any
|
||||
host: any
|
||||
```
|
40
examples/quickstart-vagrant/Vagrantfile
vendored
Normal file
40
examples/quickstart-vagrant/Vagrantfile
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
Vagrant.require_version ">= 2.2.6"
|
||||
|
||||
nodes = [
|
||||
{ :hostname => 'generic1.vagrant', :ip => '172.11.91.210', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
{ :hostname => 'generic2.vagrant', :ip => '172.11.91.220', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
{ :hostname => 'lighthouse1.vagrant', :ip => '172.11.91.230', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
]
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
|
||||
config.ssh.insert_key = false
|
||||
|
||||
if Vagrant.has_plugin?('vagrant-cachier')
|
||||
config.cache.enable :apt
|
||||
else
|
||||
printf("** Install vagrant-cachier plugin to speedup deploy: `vagrant plugin install vagrant-cachier`.**\n")
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?('vagrant-hostmanager')
|
||||
config.hostmanager.enabled = true
|
||||
config.hostmanager.manage_host = true
|
||||
config.hostmanager.include_offline = true
|
||||
else
|
||||
config.vagrant.plugins = "vagrant-hostmanager"
|
||||
end
|
||||
|
||||
nodes.each do |node|
|
||||
config.vm.define node[:hostname] do |node_config|
|
||||
node_config.vm.box = node[:box]
|
||||
node_config.vm.hostname = node[:hostname]
|
||||
node_config.vm.network :private_network, ip: node[:ip]
|
||||
node_config.vm.provider :virtualbox do |vb|
|
||||
vb.memory = node[:ram]
|
||||
vb.cpus = node[:cpus]
|
||||
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
|
||||
vb.customize ['guestproperty', 'set', :id, '/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold', 10000]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
4
examples/quickstart-vagrant/ansible/ansible.cfg
Normal file
4
examples/quickstart-vagrant/ansible/ansible.cfg
Normal file
@ -0,0 +1,4 @@
|
||||
[defaults]
|
||||
host_key_checking = False
|
||||
private_key_file = ~/.vagrant.d/insecure_private_key
|
||||
become = yes
|
@ -0,0 +1,21 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'to_nebula_ip': self.to_nebula_ip,
|
||||
'map_to_nebula_ips': self.map_to_nebula_ips,
|
||||
}
|
||||
|
||||
def to_nebula_ip(self, ip_str):
|
||||
ip_list = map(int, ip_str.split("."))
|
||||
ip_list[0] = 10
|
||||
ip_list[1] = 168
|
||||
ip = '.'.join(map(str, ip_list))
|
||||
return ip
|
||||
|
||||
def map_to_nebula_ips(self, ip_strs):
|
||||
ip_list = [ self.to_nebula_ip(ip_str) for ip_str in ip_strs ]
|
||||
ips = ', '.join(ip_list)
|
||||
return ips
|
11
examples/quickstart-vagrant/ansible/inventory
Normal file
11
examples/quickstart-vagrant/ansible/inventory
Normal file
@ -0,0 +1,11 @@
|
||||
[all]
|
||||
generic1.vagrant
|
||||
generic2.vagrant
|
||||
lighthouse1.vagrant
|
||||
|
||||
[generic]
|
||||
generic1.vagrant
|
||||
generic2.vagrant
|
||||
|
||||
[lighthouse]
|
||||
lighthouse1.vagrant
|
20
examples/quickstart-vagrant/ansible/playbook.yml
Normal file
20
examples/quickstart-vagrant/ansible/playbook.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: test connection to vagrant boxes
|
||||
hosts: all
|
||||
tasks:
|
||||
- debug: msg=ok
|
||||
|
||||
- name: build nebula binaries locally
|
||||
connection: local
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- command: chdir=../../../ make bin-vagrant
|
||||
tags:
|
||||
- build-nebula
|
||||
|
||||
- name: install nebula on all vagrant hosts
|
||||
hosts: all
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- nebula
|
@ -0,0 +1,3 @@
|
||||
---
|
||||
# defaults file for nebula
|
||||
nebula_config_directory: "/etc/nebula/"
|
@ -0,0 +1,15 @@
|
||||
[Unit]
|
||||
Description=nebula
|
||||
Wants=basic.target
|
||||
After=basic.target network.target
|
||||
|
||||
[Service]
|
||||
SyslogIdentifier=nebula
|
||||
StandardOutput=syslog
|
||||
StandardError=syslog
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -0,0 +1,5 @@
|
||||
-----BEGIN NEBULA CERTIFICATE-----
|
||||
CkAKDm5lYnVsYSB0ZXN0IENBKNXC1NYFMNXIhO0GOiCmVYeZ9tkB4WEnawmkrca+
|
||||
hsAg9otUFhpAowZeJ33KVEABEkAORybHQUUyVFbKYzw0JHfVzAQOHA4kwB1yP9IV
|
||||
KpiTw9+ADz+wA+R5tn9B+L8+7+Apc+9dem4BQULjA5mRaoYN
|
||||
-----END NEBULA CERTIFICATE-----
|
@ -0,0 +1,4 @@
|
||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||
FEXZKMSmg8CgIODR0ymUeNT3nbnVpMi7nD79UgkCRHWmVYeZ9tkB4WEnawmkrca+
|
||||
hsAg9otUFhpAowZeJ33KVA==
|
||||
-----END NEBULA ED25519 PRIVATE KEY-----
|
@ -0,0 +1,5 @@
|
||||
---
|
||||
# handlers file for nebula
|
||||
|
||||
- name: restart nebula
|
||||
service: name=nebula state=restarted
|
@ -0,0 +1,56 @@
|
||||
---
|
||||
# tasks file for nebula
|
||||
|
||||
- name: get the vagrant network interface and set fact
|
||||
set_fact:
|
||||
vagrant_ifce: "ansible_{{ ansible_interfaces | difference(['lo',ansible_default_ipv4.alias]) | sort | first }}"
|
||||
tags:
|
||||
- nebula-conf
|
||||
|
||||
- name: install built nebula binary
|
||||
copy: src=../../../../../{{ item }} dest=/usr/local/bin mode=0755
|
||||
with_items:
|
||||
- nebula
|
||||
- nebula-cert
|
||||
|
||||
- name: create nebula config directory
|
||||
file: path="{{ nebula_config_directory }}" state=directory mode=0755
|
||||
|
||||
- name: temporarily copy over root.crt and root.key to sign
|
||||
copy: src={{ item }} dest=/opt/{{ item }}
|
||||
with_items:
|
||||
- vagrant-test-ca.key
|
||||
- vagrant-test-ca.crt
|
||||
|
||||
- name: sign using the root key
|
||||
command: nebula-cert sign -ca-crt /opt/vagrant-test-ca.crt -ca-key /opt/vagrant-test-ca.key -duration 4320h -groups vagrant -ip {{ hostvars[inventory_hostname][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}/9 -name {{ ansible_hostname }}.nebula -out-crt /etc/nebula/host.crt -out-key /etc/nebula/host.key
|
||||
|
||||
- name: remove root.key used to sign
|
||||
file: dest=/opt/{{ item }} state=absent
|
||||
with_items:
|
||||
- vagrant-test-ca.key
|
||||
|
||||
- name: write the content of the trusted ca certificate
|
||||
copy: src="vagrant-test-ca.crt" dest="/etc/nebula/vagrant-test-ca.crt"
|
||||
notify: restart nebula
|
||||
|
||||
- name: Create config directory
|
||||
file: path="{{ nebula_config_directory }}" owner=root group=root mode=0755 state=directory
|
||||
|
||||
- name: nebula config
|
||||
template: src=config.yml.j2 dest="/etc/nebula/config.yml" mode=0644 owner=root group=root
|
||||
notify: restart nebula
|
||||
tags:
|
||||
- nebula-conf
|
||||
|
||||
- name: nebula systemd
|
||||
copy: src=systemd.nebula.service dest="/etc/systemd/system/nebula.service" mode=0644 owner=root group=root
|
||||
register: addconf
|
||||
notify: restart nebula
|
||||
|
||||
- name: maybe reload systemd
|
||||
shell: systemctl daemon-reload
|
||||
when: addconf.changed
|
||||
|
||||
- name: nebula running
|
||||
service: name="nebula" state=started enabled=yes
|
@ -0,0 +1,84 @@
|
||||
pki:
|
||||
ca: /etc/nebula/vagrant-test-ca.crt
|
||||
cert: /etc/nebula/host.crt
|
||||
key: /etc/nebula/host.key
|
||||
|
||||
# Port Nebula will be listening on
|
||||
listen:
|
||||
host: 0.0.0.0
|
||||
port: 4242
|
||||
|
||||
# sshd can expose informational and administrative functions via ssh
|
||||
sshd:
|
||||
# Toggles the feature
|
||||
enabled: true
|
||||
# Host and port to listen on
|
||||
listen: 127.0.0.1:2222
|
||||
# A file containing the ssh host private key to use
|
||||
host_key: /etc/ssh/ssh_host_ed25519_key
|
||||
# A file containing a list of authorized public keys
|
||||
authorized_users:
|
||||
{% for user in nebula_users %}
|
||||
- user: {{ user.name }}
|
||||
keys:
|
||||
{% for key in user.ssh_auth_keys %}
|
||||
- "{{ key }}"
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
local_range: 10.168.0.0/16
|
||||
|
||||
static_host_map:
|
||||
# lighthouse
|
||||
{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}: ["{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address']}}:4242"]
|
||||
|
||||
default_route: "0.0.0.0"
|
||||
|
||||
lighthouse:
|
||||
{% if 'lighthouse' in group_names %}
|
||||
am_lighthouse: true
|
||||
serve_dns: true
|
||||
{% else %}
|
||||
am_lighthouse: false
|
||||
{% endif %}
|
||||
interval: 60
|
||||
hosts:
|
||||
- {{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}
|
||||
|
||||
# Configure the private interface
|
||||
tun:
|
||||
dev: nebula1
|
||||
# Sets MTU of the tun dev.
|
||||
# MTU of the tun must be smaller than the MTU of the eth0 interface
|
||||
mtu: 1300
|
||||
|
||||
# TODO
|
||||
# Configure logging level
|
||||
logging:
|
||||
level: info
|
||||
format: json
|
||||
|
||||
firewall:
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
default_timeout: 10m
|
||||
max_connections: 100,000
|
||||
|
||||
inbound:
|
||||
- proto: icmp
|
||||
port: any
|
||||
host: any
|
||||
- proto: any
|
||||
port: 22
|
||||
host: any
|
||||
{% if "lighthouse" in groups %}
|
||||
- proto: any
|
||||
port: 53
|
||||
host: any
|
||||
{% endif %}
|
||||
|
||||
outbound:
|
||||
- proto: any
|
||||
port: any
|
||||
host: any
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
# vars file for nebula
|
||||
|
||||
nebula_users:
|
||||
- name: user1
|
||||
ssh_auth_keys:
|
||||
- "ed25519 place-your-ssh-public-key-here"
|
1
examples/quickstart-vagrant/requirements.yml
Normal file
1
examples/quickstart-vagrant/requirements.yml
Normal file
@ -0,0 +1 @@
|
||||
ansible
|
51
examples/service_scripts/nebula.init.d.sh
Normal file
51
examples/service_scripts/nebula.init.d.sh
Normal file
@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: nebula
|
||||
# Required-Start: $local_fs $network
|
||||
# Required-Stop: $local_fs $network
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Description: nebula mesh vpn client
|
||||
### END INIT INFO
|
||||
|
||||
SCRIPT="/usr/local/bin/nebula -config /etc/nebula/config.yml"
|
||||
RUNAS=root
|
||||
|
||||
PIDFILE=/var/run/nebula.pid
|
||||
LOGFILE=/var/log/nebula.log
|
||||
|
||||
start() {
|
||||
if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE); then
|
||||
echo 'Service already running' >&2
|
||||
return 1
|
||||
fi
|
||||
echo 'Starting nebula service…' >&2
|
||||
local CMD="$SCRIPT &> \"$LOGFILE\" & echo \$!"
|
||||
su -c "$CMD" $RUNAS > "$PIDFILE"
|
||||
echo 'Service started' >&2
|
||||
}
|
||||
|
||||
stop() {
|
||||
if [ ! -f "$PIDFILE" ] || ! kill -0 $(cat "$PIDFILE"); then
|
||||
echo 'Service not running' >&2
|
||||
return 1
|
||||
fi
|
||||
echo 'Stopping nebula service…' >&2
|
||||
kill -15 $(cat "$PIDFILE") && rm -f "$PIDFILE"
|
||||
echo 'Service stopped' >&2
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
restart)
|
||||
stop
|
||||
start
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart}"
|
||||
esac
|
15
examples/service_scripts/nebula.service
Normal file
15
examples/service_scripts/nebula.service
Normal file
@ -0,0 +1,15 @@
|
||||
[Unit]
|
||||
Description=nebula
|
||||
Wants=basic.target
|
||||
After=basic.target network.target
|
||||
|
||||
[Service]
|
||||
SyslogIdentifier=nebula
|
||||
StandardOutput=syslog
|
||||
StandardError=syslog
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
Reference in New Issue
Block a user