Basic Debian Jessie Security Hardening

Posted on: 2017-04-24

Make sure you are subscribed to all official repos and then do updates and install some software.

vim /etc/apt/sources.list
deb http://ftp.debian.org/debian jessie main contrib non-free
deb http://ftp.debian.org/debian jessie-updates main contrib non-free
deb http://security.debian.org jessie/updates main contrib non-free

apt-get -y update
apt-get -y upgrade

apt-get -y install vim screen lsof telnet dnsutils openssl netcat-openbsd fail2ban unattended-upgrades netfilter-persistent iptables-persistent rsyslog sysv-rc-conf anacron rsync less

Generate and set a good root password

openssl rand -base64 24
#copy
passwd
#paste

Configure automatic updates

vim /etc/apt/apt.conf.d/50unattended-upgrades
#only the following should be uncommented
  "o=Debian,n=jessie";
  "o=Debian,n=jessie-updates";
  "o=Debian,n=jessie,l=Debian-Security";

dpkg-reconfigure -plow unattended-upgrades
y

vim /etc/apt/apt.conf.d/20auto-upgrades
#should look like
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Unattended-Upgrade "1";

systemctl enable unattended-upgrades.service

#trying it
dpkg -l vim 
#note version number
apt-get remove vim-*
apt-get install vim=2:7.4.488-7+deb8u1 vim-common=2:7.4.488-7+deb8u1 vim-runtime=2:7.4.488-7+deb8u1
dpkg -l vim 
#note version number
unattended-upgrade --debug --dry-run
#should see notes about vim upgrades
#wait a day or so and check with a dpkg -l vim again

Locking down the firewall

vi /etc/iptables/rules.v4
#make the filter section look like this. open other ports like we are for 22 for other services you plan to offer. 
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-port-unreachable
COMMIT

systemctl enable netfilter-persistent
systemctl restart netfilter-persistent

#check it
iptables --list --verbose --numeric

Protecting against brute force ssh attacks

cp /etc/fail2ban/jail.conf /etc/fail2ban/jail.local
vim /etc/fail2ban/jail.local
#needed for some crappy vpses
banaction = iptables
#ensure
[ssh]
enabled  = true
port     = ssh
filter   = sshd
logpath  = /var/log/auth.log
maxretry = 6

systemctl enable fail2ban
systemctl restart fail2ban

#check it
iptables --list --verbose --numeric
#try to login 10 or so times from a box that isn't the one you are connecting through right now or just wait and soon enough some 'hacker' will be blocked.

Turning off things that you aren't using

systemctl list-unit-files --type=service | grep enabled
anacron-resume.service                 enabled 
anacron.service                        enabled 
cron.service                           enable
getty@.service                         enabled 
hwclock-save.service                   enabled 
netfilter-persistent.service           enabled 
quota.service                          enabled 
rsyslog.service                        enabled 
ssh.service                            enabled 
sshd.service                           enabled 
syslog.service                         enabled 
systemd-networkd.service               enabled 
systemd-readahead-collect.service      enabled 
systemd-readahead-drop.service         enabled 
systemd-readahead-replay.service       enabled 
systemd-resolved.service               enabled 
systemd-timesyncd.service              enabled 
unattended-upgrades.service            enabled 
vzfifo.service                         enabled

^is what a tweaked system looks like for me.  You'll want to systemctl disable $x for anything you don't want to have running.

sysv-rc-conf

#disable things you aren't going to use, by unchecking them in 2-5.  A tweaked system for me runs:
bootlogs
fail2ban
motd
rc.local
rmnologin
rsyslog
ssh

Now reboot and make sure things look right

reboot
#almost nothing should be running
ps -ef 
root         1     0  0 12:38 ?        00:00:00 init -z       
root         2     1  0 12:38 ?        00:00:00 [kthreadd/10638]
root         3     2  0 12:38 ?        00:00:00 [khelper/10638]
systemd+    51     1  0 12:38 ?        00:00:00 /lib/systemd/systemd-networkd
root        52     1  0 12:38 ?        00:00:00 /lib/systemd/systemd-udevd
root        86     1  0 12:38 ?        00:00:00 /lib/systemd/systemd-journald
root       189     1  0 12:38 ?        00:00:00 /usr/sbin/sshd -D
systemd+   190     1  0 12:38 ?        00:00:00 /lib/systemd/systemd-resolved
root       191     1  0 12:38 ?        00:00:00 /usr/sbin/rsyslogd -n
root       208     1  0 12:38 tty2     00:00:00 /sbin/agetty --noclear tty2 linux
root       209     1  0 12:38 tty1     00:00:00 /sbin/agetty --noclear --keep-baud console 115200 38400 9600 vt102
root       217     1  0 12:38 ?        00:00:00 /usr/bin/python /usr/bin/fail2ban-server -b -s /var/run/fail2ban/fail2ban.sock -p /var/run/fail2ban/fail2ban.pid
root       283   189  0 13:48 ?        00:00:00 sshd: root@pts/0    
root       285   283  0 13:48 pts/0    00:00:00 -bash

#should be very few ports listening
COMMAND PID USER   FD   TYPE    DEVICE SIZE/OFF NODE NAME
sshd    189 root    3u  IPv4 149984794      0t0  TCP *:22 (LISTEN)
sshd    189 root    4u  IPv6 149984796      0t0  TCP *:22 (LISTEN)

#firewall should be locked down
 iptables --list --verbose --numeric 
Chain INPUT (policy ACCEPT 0 packets, 0 bytes)
 pkts bytes target     prot opt in     out     source               destination         
  230 19105 fail2ban-ssh  tcp  --  *      *       0.0.0.0/0            0.0.0.0/0            tcp dpt:22
  233 19779 ACCEPT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            state RELATED,ESTABLISHED
0     0 ACCEPT     icmp --  *      *       0.0.0.0/0            0.0.0.0/0           
0     0 ACCEPT     all  --  lo     *       0.0.0.0/0            0.0.0.0/0           
7   376 ACCEPT     tcp  --  *      *       0.0.0.0/0            0.0.0.0/0            tcp dpt:22
   84  5155 REJECT     all  --  *      *       0.0.0.0/0            0.0.0.0/0            reject-with icmp-port-unreachable

Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
 pkts bytes target     prot opt in     out     source               destination

Chain OUTPUT (policy ACCEPT 166 packets, 18623 bytes)
 pkts bytes target     prot opt in     out     source               destination

Chain fail2ban-ssh (1 references)
 pkts bytes target     prot opt in     out     source               destination         
  230 19105 RETURN     all  --  *      *       0.0.0.0/0            0.0.0.0/0

Single box OpenNebula setup

Posted on: 2017-04-24

Every once in a while I rebuild my home lab. This time I decided to choose something other than oVirt for my virtualization needs. I went with Open Nebula and I got to say I'm impressed. It is fast, has a good install routine, good docs, a good UI and just generally doesn't suck.

This post will setup a single server to be all components of an Open Nebula install. VMs hosted by it will be exposed on your normal home network with no extra security added.

Start with a fresh minimal CentOS 7 install on some hardware that supports virtualization and pick out some IPs in your home network range that you want to devote to you lab VMs and then do the following.

Create your network bridge and open up a firewall hole. Substitute your network info and devices.

yum -y install bridge-utils

vim /etc/hosts
192.168.1.105 lab.lan

vi /etc/sysconfig/network-scripts/ifcfg-lan0
DEVICE="lan0"
BOOTPROTO="static"
IPADDR="192.168.1.105"
NETMASK="255.255.255.0"
GATEWAY="192.168.1.1"
DNS1=192.168.1.1
ONBOOT="yes"
TYPE="Bridge"
NM_CONTROLLED="no"

vi /etc/sysconfig/network-scripts/ifcfg-eno1
DEVICE=eno1
TYPE=Ethernet
BOOTPROTO=none
ONBOOT=yes
NM_CONTROLLED=no
BRIDGE=lan0

firewall-cmd --zone=public --add-rich-rule='rule family="ipv4" source address="192.168.0.0/16" accept' --permanent

firewall-cmd --reload
systemctl restart network

Do some system config. Do updates. Add repos. Install software.

vi /etc/selinux/config
SELINUX=disabled

cat << EOT > /etc/yum.repos.d/opennebula.repo
[opennebula]
name=opennebula
baseurl=http://downloads.opennebula.org/repo/5.2/CentOS/7/x86_64
enabled=1
gpgcheck=0
EOT

yum -y install epel-release

yum -y update
reboot

yum -y install opennebula-server opennebula-sunstone opennebula-ruby opennebula-gate opennebula-flow opennebula opennebula-node-kvm opennebula-common nmap-ncat vim lsof screen net-tools telnet rsync

/usr/share/one/install_gems
1. CentOS/RedHat/Scientific
 Press enter to continue...

Set password. Start the services. Test the GUI

su - oneadmin
echo "oneadmin:test1234" > ~/.one/one_auth

ssh lab.lan #should get in without a password
Are you sure you want to continue connecting (yes/no)? yes

exit
exit

systemctl enable opennebula
systemctl start opennebula
systemctl enable opennebula-sunstone
systemctl start opennebula-sunstone
systemctl enable libvirtd
systemctl restart libvirtd

http://IP:9869 oneadmin/test1234

Configure networking, add a host and a user.

Network > Virtual Networks  > + 
General> 
    Name: lan0
Conf >
    Bridge: lan0
    mode:  Bridge
Addresses > IPv4 #put your info
    First: 192.168.1.200
    Size: 50
Context #user your info
    Network address: 192.168.1.0
    Gateway: 192.168.1.1
    DNS: 192.168.1.1
    Network mask: 255.255.255.0
    MTU: 1400
Create

Infrastructure > Hosts > +
    hostname: lab.lan 
Create
Refresh until Status = ON

System > Users > +
    username: dminnich
    password, confirm: test1234
Create

System > Groups > users > update
    User view > Group Users
Update

Login as your new user and create a VM

oneadmin > sign out
dminnich / test1234

dminnich > settings > add SSH key 
paste in your ssh-key from your client machine. ssh-keygen if you don't have one.

dminnich > views > user
    Storage > Apps > Check Debian 8 KVM
    OpenNebula
    Select default datastore
    Download

dminnich > views > cloud
    VMs > + > Debian 8 
    Network > lan0
    Create

Testing

Wait for the VM status to go green.
ssh root@ip. From the box that has your ssh-key

References: http://docs.opennebula.org/5.2/deployment/index.html


Playing with OpenShift 3

Posted on: 2017-04-15

OpenShift is the PaaS solution offered by Red Hat. It uses Docker containers and Kubernetes to orchestrate them. It also comes with nice CI/CD features. You can check some code into a repo and then: watch it get built, saved as a docker image, spawned in an environment in an HA way via a rolling restart that retires the old version and brings this one up with no downtime.

Stand up a minimal CentOS 7 VM. You probably want at least 2 cores, 4GB of RAM and 20GB of space.

yum -y update
yum -y install docker vim wget git
vim /etc/sysconfig/docker
    INSECURE_REGISTRY='--insecure-registry 172.30.0.0/16'
systemctl start docker
systemctl enable docker
wget https://github.com/openshift/origin/releases/download/v1.4.1/openshift-origin-client-tools-v1.4.1-3f9807a-linux-64bit.tar.gz
tar -xzf openshift-origin-client-tools-v1.4.1-3f9807a-linux-64bit.tar.gz --strip=1 -C /usr/local/bin/ "*oc"
mkdir /opt/openshift_data/
[root@openshift ~]# oc cluster up --host-data-dir=/opt/openshift_data/
-- Checking OpenShift client ... OK
-- Checking Docker client ... OK
-- Checking Docker version ... OK
-- Checking for existing OpenShift container ... OK
-- Checking for openshift/origin:v1.4.1 image ... 
Pulling image openshift/origin:v1.4.1
Pulled 0/3 layers, 3% complete
Pulled 1/3 layers, 33% complete
Pulled 2/3 layers, 99% complete
Pulled 3/3 layers, 100% complete
Extracting
Image pull complete
-- Checking Docker daemon configuration ... OK
-- Checking for available ports ... OK
-- Checking type of volume mount ... 
Using nsenter mounter for OpenShift volumes
-- Creating host directories ... OK
-- Finding server IP ... 
Using 192.168.1.68 as the server IP
-- Starting OpenShift container ... 
Creating initial OpenShift configuration
Starting OpenShift using container 'origin'
Waiting for API server to start listening
OpenShift server started
-- Adding default OAuthClient redirect URIs ... OK
-- Installing registry ... OK
-- Installing router ... OK
-- Importing image streams ... OK
-- Importing templates ... OK
-- Login to server ... OK
-- Creating initial project "myproject" ... OK
-- Removing temporary directory ... OK
-- Server Information ... 
OpenShift server started.
The server is accessible via web console at:
   https://192.168.1.68:8443

You are logged in as:
   User:     developer
   Password: developer

To login as administrator:
   oc login -u system:admin

Explore the gui

https://192.168.1.68:8443/
developer / developer

Lets create some source code. Then use openshift to build it, save it as an image, deploy the image, then expose it as a route that we can access.

oc whoami
developer
oc new-project testphp
mkdir testphp
cd testphp
git init
vim index.php
<?php
echo "hi everybody";
phpinfo();
?>
git add .
git commit -m '1'

[root@openshift testphp]# oc new-app --name=testphp .
--> Found image 4da2ce1 (4 days old) in image stream "openshift/php" under tag "5.6" for "php"

Apache 2.4 with PHP 5.6 
----------------------- 
Platform for building and running PHP 5.6 applications

Tags: builder, php, php56, rh-php56

* The source repository appears to match: php
* A source build using binary input will be created
  * The resulting image will be pushed to image stream "testphp:latest"
  * A binary build was created, use 'start-build --from-dir' to trigger a new build
* This image will be deployed in deployment config "testphp"
* Port 8080/tcp will be load balanced by service "testphp"
  * Other containers can access this service through the hostname "testphp"

--> Creating resources ...
imagestream "testphp" created
buildconfig "testphp" created
deploymentconfig "testphp" created
service "testphp" created
--> Success
Use 'oc start-build testphp' to start a build.
Run 'oc status' to view your app.

[root@openshift testphp]# oc start-build testphp --from-dir=.
Uploading directory "." as binary input for the build ...
build "testphp-1" started

[root@openshift testphp]# oc logs -f build/testphp-1
Receiving source from STDIN as archive ...
---> Installing application source...
Pushing image 172.30.147.41:5000/testphp/testphp:latest ...
Pushed 0/9 layers, 2% complete
Pushed 1/9 layers, 11% complete
Pushed 2/9 layers, 24% complete
Pushed 3/9 layers, 35% complete
Pushed 4/9 layers, 46% complete
Pushed 5/9 layers, 57% complete
Pushed 6/9 layers, 69% complete
Pushed 7/9 layers, 85% complete
Pushed 7/9 layers, 100% complete
Pushed 8/9 layers, 100% complete
Pushed 9/9 layers, 100% complete
Push successful

oc logs -f dc/testphp
--> Scaling testphp-1 to 1
--> Waiting up to 10m0s for pods in deployment testphp-1 to become ready
--> Success

[root@openshift testphp]# oc get pods
NAME              READY     STATUS      RESTARTS   AGE
testphp-1-build   0/1       Completed   0          2m
testphp-1-l59j5   1/1       Running     0          23s

[root@openshift testphp]# oc expose service testphp
route "testphp" exposed

[root@openshift testphp]# oc status
In project testphp on server https://192.168.1.68:8443

http://testphp-testphp.192.168.1.68.xip.io to pod port 8080-tcp (svc/testphp)
dc/testphp deploys istag/testphp:latest <- bc/testphp source builds uploaded code on openshift/php:5.6 
deployment #1 deployed about a minute ago - 1 pod

Test it out in a browser and explore the gui a bit.

http://testphp-testphp.192.168.1.68.xip.io

Alright. Lets update our app.

vim index.php
echo "bye everybody";
git add .
git commit -m '2'
oc start-build testphp --from-dir=.
Uploading directory "." as binary input for the build ...
build "testphp-2" started

oc logs -f build/testphp-2
Receiving source from STDIN as archive ...
---> Installing application source...
Pushing image 172.30.147.41:5000/testphp/testphp:latest ...
Pushed 6/9 layers, 67% complete
Pushed 7/9 layers, 78% complete
Pushed 8/9 layers, 89% complete
Pushed 9/9 layers, 100% complete
Push successful

oc logs -f dc/testphp
[root@openshift testphp]# oc logs -f dc/testphp
--> Scaling up testphp-2 from 0 to 1, scaling down testphp-1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods)
Scaling testphp-2 up to 1
Scaling testphp-1 down to 0
--> Success

Refersh the page in your browser to see the bye message.

When configured correctly all those rebuild steps are automatic based on code checkins.



<<Newer Older>>