Author Archive

Oct 20

Monitor progress of large dd process

If you want to see how a large copy is proceeding you can send -USR1 to the process.

Copy

# dd if=a.img of=/dev/libvirt_lvm/a-os

In a new terminal send -USR1 to the process or even use watch if you like.

# watch -n60 'kill -USR1 $(pgrep ^dd)'

The copy will start showing some progress lines after each -USR1 received.

6370041+0 records in
6370041+0 records out
3261460992 bytes (3.3 GB) copied, 247.17 s, 13.2 MB/s

Comments Off on Monitor progress of large dd process
comments

Oct 14

DynamoDB Test

Boto3 and AWS DynamoDB usage...

http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html

$ cat dynamodbTest.py 
import boto3

#dynamodb = boto3.resource('dynamodb')
# Hard coding strings as credentials, not recommended. Use configs or env variables AWS_ACCESS_KEY, AWS_SECRET_KEY
dynamodb = boto3.resource(
    'dynamodb',
    aws_access_key_id='KEY_ID_REMOVED',
    aws_secret_access_key='ACCESS_KEY_REMOVED',
    region_name = 'us-east-1'
)

def create_table(tableName):
  table = dynamodb.create_table(
    TableName=tableName,
    KeySchema=[
        {
            'AttributeName': 'username', 
            'KeyType': 'HASH'
        },
        {
            'AttributeName': 'last_name', 
            'KeyType': 'RANGE'
        }
    ], 
    AttributeDefinitions=[
        {
            'AttributeName': 'username', 
            'AttributeType': 'S'
        }, 
        {
            'AttributeName': 'last_name', 
            'AttributeType': 'S'
        }, 
    ], 
    ProvisionedThroughput={
        'ReadCapacityUnits': 1, 
        'WriteCapacityUnits': 1
    }
  )

  table.meta.client.get_waiter('table_exists').wait(TableName=tableName)
  print 'Table item count: {}'.format(table.item_count)

def delete_table(tableName):
  table = dynamodb.Table(tableName)
  table.delete()

def put_item(tableName):
  table = dynamodb.Table(tableName)

  response = table.put_item(
   Item={
        'username': 'jdoe',
        'first_name': 'jane',
        'last_name': 'doe',
        'age': 20,
        'account_type': 'librarian',
    }
  )

  print response

def get_item(tableName):
  table = dynamodb.Table(tableName)

  response = table.get_item(
   Key={
        'username': 'jdoe',
        'last_name': 'doe'
    }
  )

  item = response['Item']
  name = item['first_name']

  print(item)
  print("Hello, {}" .format(name))

def update_item(tableName):
  table = dynamodb.Table(tableName)

  table.update_item(
    Key={
        'username': 'jdoe',
        'last_name': 'doe'
    },
    UpdateExpression='SET age = :val1',
    ExpressionAttributeValues={
        ':val1': 23
    }
  )

def delete_item(tableName):
  table = dynamodb.Table(tableName)

  table.delete_item(
    Key={
        'username': 'jdoe',
        'last_name': 'doe'
    }
  )

def batch_write(tableName):
  table = dynamodb.Table(tableName)

  with table.batch_writer() as batch:
    batch.put_item(
        Item={
            'account_type': 'end_user',
            'username': 'bbob',
            'first_name': 'billy',
            'last_name': 'bob',
            'age': 20,
            'address': {
                'road': '1 fake street',
                'city': 'Houston',
                'state': 'TX',
                'country': 'USA'
            }
        }
    )
    batch.put_item(
        Item={
            'account_type': 'librarian',
            'username': 'user1',
            'first_name': 'user1 first name',
            'last_name': 'user1 last name',
            'age': 20,
            'address': {
                'road': '10 fake street',
                'city': 'Dallas',
                'state': 'TX',
                'country': 'USA'
            }
        }
    )
    batch.put_item(
        Item={
            'account_type': 'end_user',
            'username': 'user2',
            'first_name': 'user2 first name',
            'last_name': 'user2 last name',
            'age': 23,
            'address': {
                'road': '12 fake street',
                'city': 'Austin',
                'province': 'TX',
                'state': 'USA'
            }
        }
    )

def create_multiple_items(tableName,itemCount):

  table = dynamodb.Table(tableName)

  with table.batch_writer() as batch:
    for i in range(itemCount):
        batch.put_item(
            Item={
                'account_type': 'anonymous',
                'username': 'user-' + str(i),
                'first_name': 'unknown',
                'last_name': 'unknown'
            }
        )


def query(tableName):
  from boto3.dynamodb.conditions import Key, Attr
  table = dynamodb.Table(tableName)

  response = table.query(
    KeyConditionExpression=Key('username').eq('user2')
  )

  items = response['Items']
  print(items)

def scan(tableName):
  from boto3.dynamodb.conditions import Key, Attr

  table = dynamodb.Table(tableName)

  response = table.scan(
    FilterExpression=Attr('age').gt(23)
  )

  items = response['Items']
  print(items)

  len(items)
  for x in range(len(items)): 
    items[x]['username']

def query_filter(tableName):
  from boto3.dynamodb.conditions import Key, Attr

  table = dynamodb.Table(tableName)

  response = table.scan(
    FilterExpression=Attr('first_name').begins_with('r') & Attr('account_type').eq('librarian')
  )

  items = response['Items']
  print(items)


# Comment/uncomment below to play with the different functions
#create_table('staff')

#put_item('staff')
#get_item('staff')
#update_item('staff')
#delete_item('staff')

#batch_write('staff')

#create_multiple_items('staff', 100)

#query('staff')
#scan('staff')
#query_filter('staff')

#delete_table('staff')

Comments Off on DynamoDB Test
comments

Oct 14

Linux MSSQL Client

Quick note on connecting to a MSSQL database from Linux using tsql from FreeTDS. FreeTDS is a set of libraries for Unix and Linux that allows your programs to natively talk to Microsoft SQL Server and Sybase databases.

$ tsql -S DEVSQL1 -U <user> -P <password>
locale is "en_US.UTF-8"
locale charset is "UTF-8"
using default charset "UTF-8"
1> select @@version
2> go

Microsoft SQL Server 2016 (RTM-CU1) (KB3164674) - 13.0.2149.0 (X64) 
	Jul 11 2016 22:05:22 
	Copyright (c) Microsoft Corporation
	Developer Edition (64-bit) on Windows Server 2012 R2 Standard 6.3 <X64> (Build 9600: ) (Hypervisor)

(1 row affected)

1> use DEVDB1
2> go
1> SELECT * FROM INFORMATION_SCHEMA.TABLES 
2> go
TABLE_CATALOG	TABLE_SCHEMA	TABLE_NAME	TABLE_TYPE
DEVDB1	        TABLE_1  	JPS_DN	        BASE TABLE

Ref: https://tryolabs.com/blog/2012/06/25/connecting-sql-server-database-python-under-ubuntu/

Comments Off on Linux MSSQL Client
comments

Sep 29

SSH JumpHost

Newer versions of ssh simplified the ProxyCommand directive a little in config files with ProxyJump directive.

A command line ProxyCommand may work like this for you:

$ ssh -i private_key -o "ProxyCommand ssh -W %h:%p -i private_key user@<jumphost IP address" user@<private IP address>

Config file entries like this:

$ more config 
Host gw01
     HostName <jumphost IP>
     User <username>
     IdentityFile /full/path/private_key
Host server1
     HostName 10.2.3.3
     ProxyJump gw01
     User <username>
     IdentityFile /full/path/private_key

Using ssh config you can simply ssh like this:

$ ssh server1

Or better if you have many projects using ssh with a custom config file:

$ ssh -F my-config server1

Note: I am not able to use ProxyJump as a command line one liner with the -J flag when I have private keys on both the JumpHost and Private Host. For example below does not specify the -i for the jump host and not sure it will accept it command line.

$ ssh -i key -J user@public-IP user@private-IP

Couple examples of scp using the config file and/or jumping:

rrosso@rrosso-VirtualBox:~/.ssh$ scp -F my-config -oProxyJump=gw01 /media/antergos-17.6-x86_64.iso  host01:/pool/

rrosso@rrosso-VirtualBox:~/.ssh$ scp -F my-config  /media/antergos-17.6-x86_64.iso  gw01:

Comments Off on SSH JumpHost
comments

Sep 25

Compute Instance in OCI using terraform

Begin Update 9/26/17.

It is possible to reference the subnet as follow:

subnet_id = "${oci_core_subnet.PrivSubnetAD1.id}"

My problem and workaround originally is because I am using modules. I would prefer modules so I can organize and subdivide better but it caused above to not work. Plus subdividing the work may cause more issues around losing access to variables/references.
End Update 9/26/17

Most likely there is a better way to do this but since I spent some time on it I am jotting down my notes. When creating compute instances with terraform in Oracle Cloud Infrastructure(Oracle Bare Metal Services) you have to specify the subnet_id. The id or ocid as called in OCI is a long unique string.

So if you are looking at automating the terraform build you may struggle with not knowing subnet_id when creating a compute instance. As I said there may be better ways to do this and maybe the AWS plugin for terraform handles this already. I did this with the OCI plugin and came up with the below script and using some custom API calls to create terraform variables of the subnets. Just showing the bash script for some ideas on flow and how it glues together. The terraform source and api calls not shown here.

#!/bin/bash

build_root="/home/rrosso/.terraform.d/MY-PROTO"
api_snippets_location="/home/rrosso/oraclebmc"
terraform_bin="/home/rrosso/.terraform.d/terraform"

function pause(){
   read -p "$*"
}

## Check if environment variables are set. Make very sure correct tenancy, compartment etc
## Or if hard coding these look in main.tf files for correct tenancy, compartment etc
env | grep TF_

pause "Press [Enter] key if Variables look ok and if ready to proceed with networking build"

cd "$build_root/networking"
$terraform_bin apply

pause "Press [Enter] key if networking build went ok and ready to generate a list of subnet ocid's"

cd "$api_snippets_location"
python get_subnet_ocids.py -t ocid1.tenancy.oc1..<cut_long_number_here> -c MYPROTO -v DEV >> $build_root/compute/webservers/variables.tf 
python get_subnet_ocids.py -t ocid1.tenancy.oc1..<cut_long_number_here> -c MYPROTO -v DEV >> $build_root/compute/bastions/variables.tf 

pause "Press [Enter] key if variables.tf looks ok with new subnet ocid's and ready to proceed building compute instances"

cd "$build_root/compute"
$terraform_bin apply

Comments Off on Compute Instance in OCI using terraform
comments

Sep 19

RDP Through SSH Server

Sometimes it becomes necessary to access Windows hosts not exposed externally and you do have a SSH server that is exposed as a "jumphost". Quick notes on my usage.

Create the tunnel to the jumpbox.

$ ssh -p 22 -L 13389:10.3.1.4:3389 -i my-ssh-key user@<public-IP>
Last login: Tue Sep 19 16:49:54 2017

Connect using RDP to the local host:port.

$ rdesktop localhost:13389
Autoselected keyboard map en-us
Failed to negotiate protocol, retrying with plain RDP.
WARNING: Remote desktop does not support colour depth 24; falling back to 16

Example script...

$ cat rdesktop_jumphost.sh 
#!/bin/bash
#
#: Script Name  : rdesktop_jumphost.sh
#: Version      : 0.1.3
#: Author       : Riaan Rossouw
#: Date Created : October 21, 2017
#: Date Updated : October 22, 2017
#: Description  : Use ssh config file to pull enough information to rdp to windows servers through a ssh jumphost
#: Examples     : rdesktop_jumphost.sh -F configfile -u user -g 1024x768

usage()
{
cat << EOF
usage: $0 options

This script use ssh config file to pull enough information to rdp to windows servers through a ssh jumphost

OPTIONS:
           -h show this message.
           -F ssh config file (required).
 	   -s servername (HostName in ssh config) (required).
           -u pass username to rdesktop.
	   -g desktop geometry (WxH)
EOF
}

while getopts "hF:s:u:g:" OPTION
 do
  case $OPTION in
   h) usage; exit 1;;
   F) configfile=$OPTARG;;
   s) HostName=$OPTARG;;
   u) username=$OPTARG;;
   g) geometry=$OPTARG;;
   \?) usage; exit 1;;
  esac
 done

NUMARGS=$#
if [ $NUMARGS -eq 0 ]; then
  usage
  exit 1
fi

PARAMS="-u $username"
PARAMS+=" -g $geometry"

localRdpPort=33389

privateIP=$(awk  "/^Host ${HostName}$/{x=1}x&&/HostName/{print \$2;exit}" ~/.ssh/$configfile)
jumphost=$(awk "/^Host ${HostName}$/{x=1}x&&/ProxyJump/{print \$2;exit}" ~/.ssh/$configfile)

if [ -z "$jumphost" ]
then
  rdesktop $PARAMS privateIP:3389
else
  jumphostIP=$(awk "/^Host ${jumphost}$/{x=1}x&&/HostName/{print \$2;exit}" ~/.ssh/$configfile)
  jumpuser=$(awk "/^Host ${jumphost}$/{x=1}x&&/User/{print \$2;exit}" ~/.ssh/$configfile)
  identityfile=$(awk "/^Host ${jumphost}$/{x=1}x&&/IdentityFile/{print \$2;exit}" ~/.ssh/$configfile)
  ssh -f -N -p 22 -L $localRdpPort:$privateIP:3389 -i $identityfile $jumpuser@$jumphostIP
  tunnelpid=$(ps -ef | grep $localRdpPort | grep -v grep | awk '{print $2}')
  rdesktop $PARAMS localhost:$localRdpPort
  kill $tunnelpid
fi

Comments Off on RDP Through SSH Server
comments

Jun 08

Ping with timestamp

Since I am always looking for my notes I am adding this snippet here for reference. Handy for checking the time a reboot takes for example.

$ ping server1 | xargs -L 1 -I '{}' date '+%Y-%m-%d %H:%M:%S: {}'
2017-06-08 07:13:21: PING server1 (10.1.10.31) 56(84) bytes of data.
2017-06-08 07:13:21: 64 bytes from 10.1.10.31 (10.1.10.31): icmp_seq=1 ttl=246 time=113 ms
2017-06-08 07:13:22: 64 bytes from 10.1.10.31 (10.1.10.31): icmp_seq=2 ttl=246 time=112 ms
2017-06-08 07:13:23: 64 bytes from 10.1.10.31 (10.1.10.31): icmp_seq=3 ttl=246 time=112 ms
2017-06-08 07:13:24: 64 bytes from 10.1.10.31 (10.1.10.31): icmp_seq=4 ttl=246 time=111 ms

Comments Off on Ping with timestamp
comments

Apr 30

ZFSSA List Replication Actions Status

Using the ZFS appliance REST API to take a quick look at all replication actions and check on progress of long running jobs.

# python zfssa_status_replication_v1.0.py

List Replicated Project Snapshots -- PST Run Date 2017-04-30 06:42:59.386738
date                       target project    pool      bytes_sent      estimated_size  estimated_time_left average_throughput
2017-04-30 07:20:04.232975 zfs2   EBSPRD     POOL1     6.78G           21.3G           01:00:35            4MB/s
2017-04-30 06:42:59.386738 zfs3   EBSPRD     POOL2     0               0               00:00:00            0B/s           
<snip>
# cat zfssa_status_replication_v1.0.py 
#!/usr/bin/env python

# Version 1.0
import sys
import requests, json, os
import datetime

requests.packages.urllib3.disable_warnings()
dt = datetime.datetime.now()

# ZFSSA API URL
url = "https://zfs1:215"

# ZFSSA authentication credentials, it reads username and password from environment variables ZFSUSER and ZFSPASSWORD
#zfsauth = (os.getenv('ZFSUSER'), os.getenv('ZFSPASSWORD'))
zfsauth = ('ROuser','password')

jsonheader={'Content-Type': 'application/json'}

def list_replication_actions_status():
  r = requests.get("%s/api/storage/v1/replication/actions" % (url), auth=zfsauth, verify=False, headers=jsonheader)
  if r.status_code != 200:
    print("Error getting actions %s %s" % (r.status_code, r.text))
  else:
   j = json.loads(r.text)
   #print j
   for action in j["actions"]:
     #print action
     print("{} {:15} {:10} {:15} ".format(dt, action["target"], action["project"], action["pool"])),
     show_one_replication_action(action["id"])

def show_one_replication_action(id):
  r = requests.get("%s/api/storage/v1/replication/actions/%s" % (url,id), auth=zfsauth, verify=False, headers=jsonheader)
  if r.status_code != 200:
    print("Error getting status %s %s" % (r.status_code, r.text))
  else:
   j = json.loads(r.text)
   #print j
   print("{:15} {:15} {:19} {:15}".format(j["action"]["bytes_sent"], j["action"]["estimated_size"], j["action"]["estimated_time_left"], j["action"]["average_throughput"]))

print ("\nList Replicated Project Snapshots -- PST Run Date %s" % dt)
print('{:26} {:15} {:10} {:16} {:15} {:15} {:16} {}').format('date','target','project','pool','bytes_sent','estimated_size','estimated_time_left','average_throughput')
list_replication_actions_status()

Comments Off on ZFSSA List Replication Actions Status
comments

Apr 26

Keystonejs Migrate from WordPress Prototype

Since I put some effort into looking at possible solutions for blogging using Node/MongoDB as underlying platform I am documenting a test I did with Keystonejs. It's website describes it as a Node.js CMS & Web Application Platform.

Need Node.js 0.10+ and MongoDB v2.4+ and high level steps on Ubuntu 17.04 as follow:
1. Setup MongoDB from package management.
2. Install a current version of node in /usr/local. Not using the version from the Ubuntu repository.
3. Install keystonejs generator(using npm) and run generator(using yo).
4. Export WordPress xml.
5. Convert WordPress xml to MongoDB import format(json). Used python lxml.
6. Run mongo import of json

STEP 1: Setup MongoDB from package management.

# apt-get install -y mongodb-org
# systemctl start mongod

STEP 2: Install a current version of node in /usr/local. Not using the version from the Ubuntu repository.
Grabbed from here: https://nodejs.org/dist/v7.9.0/node-v7.9.0-linux-x64.tar.xz

$ sudo mv node-v7.9.0-linux-x64/ /usr/local/
$ export PATH=$PATH:/usr/local/node-v7.9.0-linux-x64/bin

$ npm version
{ npm: '4.2.0',
<snip>

$ node -v
v7.9.0

STEP 3: Install keystonejs generator(using npm) and run generator(using yo).

$ npm install -g generator-keystone
$ npm install -g yo

$ cd test-project/
$ yo keystone
Your KeystoneJS project is ready to go!
<snip>

$ cd my-site/
$ node keystone
Applying update 0.0.1-admins...
Successfully created:
*   1 User
KeystoneJS Started:
My Site is ready on http://0.0.0.0:3000
------------------------------------------------

STEP 4. Export WordPress xml.

STEP 5: Convert WordPress xml to MongoDB import format(json). Used python lxml and input file riaan039ssysadminblog.wordpress.2017-04-25.xml.

$ python xmlconvert6.py > 6.json

STEP 6: Run mongo import of json

$ mongoimport --db my-site --collection posts  --drop --file 6.json

As mentioned this is a prototype and not really an option for me at this point. The xmlconvert.py script will need a lot of work to make the posts convert cleaner and keep formatting. Also this is not a full featured blog application like WordPress but rather just a framework to build on. So for me to replace my blogs I am missing things like code highlighting, permalinks etc

Example code for WordPress xml to MongoDB import json format.

import xml.etree.ElementTree as ET
from datetime import datetime
#https://docs.python.org/2/library/xml.etree.elementtree.html

#### Get to this kind of format for mongo import
#### { "slug" : "post-2", "title" : "Post 2", "categories" : [ ], "state" : "published", "__v" : 0, "content" : { "brief" : "<p>Blah 2</p>", "extended" : "" }, "publishedDate" : ISODate("2017-04-25T05:00:00Z") }

it = ET.iterparse('riaan039ssysadminblog.wordpress.2017-04-25.xml')
for _, el in it:
  #if "{http://wordpress.org/export/1.2/}" in el.tag:
  if "{" in el.tag:
    el.tag = el.tag.split('}', 1)[1]  # strip all namespaces

tree = it.root

channel = tree.find("channel")

i=0

for item in channel.findall('item'):
  i = i + 1
  slug = 'slug' + str(i)
  title = item.find('title').text
  if not title:
    title = 'no title'
  title = title.replace('"','')
  #<category domain="category" nicename="kvm"><![CDATA[KVM]]></category>
  #<category domain="category" nicename="lvm"><![CDATA[LVM]]></category>
  ## TBD: Convert categories just leaving empty for now
  categories=[]

  state=item.find('status').text
  if state == "publish":
    state = "published"
  v=0

  ## "content" : { "brief" : "<p>Test1 Brief</p>", "extended" : "<p>Test1 Extended</p>" }
  content=item.find('encoded').text
  if not content:
    content=" "
  content=content.encode('utf-8')
  content=content.replace('\n', '<br>')
  content=content.replace('\t', ' ')
  #content=content.replace('<', ' ')
  #content=content.replace('>', ' ')
  #content=content.replace('</', '<//')
  content=content.replace('"', ' ')
  content=content.replace("\\", "/")   # This is a junk cheat for now.  Need a better way for C:\Program Files\...

  content_brief=content[:30]
  content_extended=content

  ## Convert Tue, 06 Nov 2012 08:49:21 +0000 to look like this: ISODate("2017-04-25T05:00:00Z")
  d2 = item.find('pubDate').text.split(' +0000')
  d1 = d2[0].strip()
  #######pubDate = item.get('pubDate')
  #######publishedDate="2017-04-25T05:00:00Z"
  #d = datetime.strptime(d1, '%a, %d %b %Y %H:%M:%S')
  #publishedDate = d.strftime('%Y-%m-%dT%H:%M:%SZ')

  # pubDate form WordPress xml had some odd dates that could not be used with strptime so using post_date
  #<wp:post_date><![CDATA[2012-11-06 20:28:06]]></wp:post_date>
  d1 = item.find('post_date').text 
  d = datetime.strptime(d1, '%Y-%m-%d %H:%M:%S')
  publishedDate = d.strftime('%Y-%m-%dT%H:%M:%SZ')

  print ' {{ "slug": "{}", "title": "{}", "categories": {}, "state": "{}", "__v": {}, "content": {{ "brief":"<p>{}</p>", "extended":"<p>{}</p>" }}, "publishedDate": ISODate("{}") }}'.format(slug, title, categories, state, v, content_brief, content_extended, publishedDate)

Comments Off on Keystonejs Migrate from WordPress Prototype
comments

Apr 17

Linux Mount nfsv4.2

Just a quick test on using nfs v4.2. This test was on a Ubuntu 17.4 server as well as client.

# cat /etc/exports 
<snip>
/DATA	*(ro,sync,no_root_squash,insecure)
/home   192.168.1.43(rw,insecure)

# systemctl restart nfs-kernel-server

# more /proc/fs/nfsd/versions 
+2 +3 +4 +4.1 +4.2

# mount -t nfs -o minorversion=2 server1:/DATA /DATA
# nfsstat -m
/mnt/home from server1:/home
 Flags:	rw,relatime,vers=3,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.42,mountvers=3,mountport=41341,mountproto=udp,local_lock=none,addr=192.168.1.42

/DATA from server1:/DATA
 Flags:	rw,relatime,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=10.0.3.15,local_lock=none,addr=192.168.1.42

# rsync -a --progress ubuntu-17.04-desktop-amd64.iso /DATA/DATABANK/iso/
sending incremental file list
ubuntu-17.04-desktop-amd64.iso
  1,609,039,872 100%  157.75MB/s    0:00:09 (xfr#1, to-chk=0/1)

Comments Off on Linux Mount nfsv4.2
comments