Skip to content
Nuno Aguiar edited this page Mar 18, 2024 · 35 revisions

Some noteworthy examples of usage per category:

Mixing with Unix commands

Commands:

# Creates a data.ndjson file where each record is formatted from json files in /some/data 
find /some/data -name "*.json" -exec oafp {} output=json \; > data.ndjson
# Parse /proc/cpuinfo into an array
cat /proc/cpuinfo | sed "s/^$/---/mg" | ./oafp in=yaml path="[?not_null(@)]" out=ctree
# Parse the result of the ls command
ls -lad --time-style="+%Y-%m-%d %H:%M" * | oafp in=lines path="map(&split_re(@,'\\s+').{permissions:[0]
,id:[1],user:[2],group:[3],size:[4],date:[5],time:[6],file:[7]},[])" linesjoin=true out=ctable
# Parse the result of the route command
route | sed "1d" | oafp in=lines linesjoin=true linesvisual=true linesvisualsepre="\s+" out=ctable
# Parse the ‘ip tcp_metrics’ command
ip tcp_metrics | sed 's/^/target: /g' | sed 's/$/\n\n---\n/g' | sed 's/ \([a-z]*\) /\n\1: /g' | head -n -2 | oafp in=yaml path="[].{target:target,age:from_timeAbbr(replace(age,'[sec|\.]','','')),cwnd:cwnd,rtt:from_timeAbbr(rtt),rttvar:from_timeAbbr(rttvar),source:source}" sql="select * order by target" out=ctable
# Parse the ‘arp’ command output
arp | oafp in=lines linesvisual=true linesjoin=true out=ctable
# Loop over the current active network connections
oafp cmd="netstat -tun | sed \"1d\"" in=lines linesvisual=true linesjoin=true linesvisualsepre="\\s+(\\?\!Address)" out=ctable loop=1

Manipulate text

Command:

# Get a json with the lyrics of a song
curl -s https://api.lyrics.ovh/v1/Coldplay/Viva%20La%20Vida | oafp path="substring(lyrics,index_of(lyrics, '\n'),length(lyrics))"

Result:

I used to rule the world
[...]
Oooooh Oooooh Oooooh

Parsing UNIX commands

Commands:

# Converting the Unix’s systemctl list-timers
systemctl list-timers | head -n -3 | oafp in=lines linesvisual=true linesjoin=true out=ctable
# Converting the Unix’s systemctl list-units
systemctl list-units | head -n -6 | oafp in=lines linesvisual=true linesjoin=true path="[].delete(@,'')" out=ctable
# Converting the Unix’s systemctl list-units into an overview table
systemctl list-units | head -n -6 | oafp in=lines linesvisual=true linesjoin=true path="[].delete(@,'')" sql="select \"LOAD\", \"ACTIVE SUB\", count
(1) as \"COUNT\" group by \"LOAD\", \"ACTIVE SUB\"" sqlfilter=advanced out=ctable
# Converting the Unix's df output
df --output=target,fstype,size,used,avail,pcent | tail -n +2 | oafp in=lines linesjoin=true path="[].split_re(@, ' +').{filesystem:[0],type:[1],size:[2],used:[3
],available:[4],use:[5]}" out=ctable

Parsing UNIX files

Commands:

# Converting the Unix’s syslog into a json output
cat syslog | oafp in=raw path="split(trim(@),'\n').map(&split(@, ' ').{ date: concat([0],concat(' ',[1])), time: [2], host: [3], process: [4], message: join(' ',[5:]) }, [])"
# Converting /etc/os-release to SQL insert statements
oafp cmd="cat /etc/os-release" in=ini outkey=release path="[@]" sql="select '$HOSTNAME' \"HOST\", *" out=sql sqlnocreate=true

Command:

# Parses the /etc/passwd to a table order by uid and gid
oafp cmd="cat /etc/passwd" in=csv inputcsv="(withHeader: false, withDelimiter: ':')" path="[].{user:f0,pass:f1,uid:to_number(f2),gid:to_number(f3),description:f4,home:f5,shell:f6}" sql="select * order by uid,gid" out=ctable

# or
oafp cmd="cat /etc/passwd" in=csv inputcsv="(withHeader: false, withDelimiter: ':')" path="[].{user:f0,pass:f1,uid:to_number(f2),gid:to_number(f3),description:f4,home:f5,shell:f6}" out=json | oafp from="notStarts(user, '#').sort(uid, gid)" out=ctable

Result:

  user  │pass│ uid │ gid │           description            │     home      │      shell      
────────┼────┼─────┼─────┼──────────────────────────────────┼───────────────┼─────────────────
root    │x   │0    │0    │root                              │/root          │/bin/bash        
daemon  │x   │1    │1    │daemon                            │/usr/sbin      │/usr/sbin/nologin
bin     │x   │2    │2    │bin                               │/bin           │/usr/sbin/nologin
sys     │x   │3    │3    │sys                               │/dev           │/usr/sbin/nologin
sync    │x   │4    │65534│sync                              │/bin           │/bin/sync        
games   │x   │5    │60   │games                             │/usr/games     │/usr/sbin/nologin
man     │x   │6    │12   │man                               │/var/cache/man │/usr/sbin/nologin
lp      │x   │7    │7    │lp                                │/var/spool/lpd │/usr/sbin/nologin
mail    │x   │8    │8    │mail                              │/var/mail      │/usr/sbin/nologin
news    │x   │9    │9    │news                              │/var/spool/news│/usr/sbin/nologin
uucp    │x   │10   │10   │uucp                              │/var/spool/uucp│/usr/sbin/nologin
proxy   │x   │13   │13   │proxy                             │/bin           │/usr/sbin/nologin
www-data│x   │33   │33   │www-data                          │/var/www       │/usr/sbin/nologin
backup  │x   │34   │34   │backup                            │/var/backups   │/usr/sbin/nologin
list    │x   │38   │38   │Mailing List Manager              │/var/list      │/usr/sbin/nologin
irc     │x   │39   │39   │ircd                              │/run/ircd      │/usr/sbin/nologin
gnats   │x   │41   │41   │Gnats Bug-Reporting System (admin)│/var/lib/gnats │/usr/sbin/nologin
_apt    │x   │100  │65534│                                  │/nonexistent   │/usr/sbin/nologin
nobody  │x   │65534│65534│nobody                            │/nonexistent   │/usr/sbin/nologin
[#19 rows]

External APIs

Commands:

# Generating a simple table of the current public IP address
curl -s https://ifconfig.co/json | oafp flatmap=true out=map
# Converting the Cloudflare DNS trace info
curl -s https://1.1.1.1/cdn-cgi/trace | oafp in=ini out=ctree

Grids

Commands:

# Grid with Java's threads, class loaders, heap and metaspace from a Java's hsperf file
oafp /tmp/hsperfdata_user/12345 in=hsperf path=java out=grid grid="[[(title:Threads,type:chart,obj:'int threads.live:green:live threads.livePeak:red:peak threads.daemon:blue:daemon -min:0')|(title:Class Loaders,type:chart,obj:'int cls.loadedClasses:blue:loaded cls.unloadedClasses:red:unloaded')]|[(title:Heap,type:chart,obj:'bytes __mem.total:red:total __mem.used:blue:used -min:0')|(title:Metaspace,type:chart,obj:'bytes __mem.metaTotal:blue:total __mem.metaUsed:green:used -min:0')]]" loop=1

Using JSON schemas

Command:

# Get a list of schemas
oafp cmd="curl https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/api/json/catalog.json" path="schemas[].{name:name,description:description,files:to_string(fileMatch)}" out=ctable

Result:

                             name                              │                                                description                                                 │                                         files                                          
───────────────────────────────────────────────────────────────┼────────────────────────────────────────────────────────────────────────────────────────────────────────────┼────────────────────────────────────────────────────────────────────────────────────────
1Password SSH Agent Config                                     │Configuration file for the 1Password SSH agent                                                              │["**/1password/ssh/agent.toml"]                                                         
Application Accelerator                                        │Application Accelerator for VMware Tanzu                                                                    │["accelerator.yaml"]                                                                    
AnyWork Automation Configuration                               │AnyWork Automation Configuration used to configure automation scripts on AnyWork                            │[".awc.yaml",".awc.yml",".awc.json",".awc.jsonc",".awc"]                                
[...]

Using docker

Command:

oafp cmd="docker ps --format json" input=ndjson ndjsonjoin=true path="[].{id:ID,name:Names,state:State,image:Image,networks:Networks,ports:Ports,Status:Status}" sql="select * order by networks,state,name" output=ctable

Result:

     id     │          name          │ state │            image             │       networks       │                         ports                         │  Status  
────────────┼────────────────────────┼───────┼──────────────────────────────┼──────────────────────┼───────────────────────────────────────────────────────┼──────────
af3adb5b8349│registry                │running│registry:2                    │bridge,k3d-k3s-default│0.0.0.0:5000->5000/tcp                                 │Up 2 hours
cba6e3807b44│k3d-k3s-default-server-0│running│rancher/k3s:v1.27.4-k3s1      │k3d-k3s-default       │                                                       │Up 2 hours
b775ad480764│k3d-k3s-default-serverlb│running│ghcr.io/k3d-io/k3d-proxy:5.6.0│k3d-k3s-default       │80/tcp, 0.0.0.0:1080->1080/tcp, 0.0.0.0:45693->6443/tcp│Up 2 hours
[#3 rows]

Using Kubectl

Command:

oafp cmd="kubectl get pods -A -o json" path="items[].{ns:metadata.namespace,kind:metadata.ownerReferences[].kind,name:metadata.name,status:status.phase,restarts:sum(status.containerStatuses[].restartCount),node:spec.nodeName,age:timeago(status.startTime)}" sql="select * order by status,name" output=ctable

Result:

    ns     │   kind   │                 name                 │ status  │restarts│          node          │     age      
───────────┼──────────┼──────────────────────────────────────┼─────────┼────────┼────────────────────────┼──────────────
kube-system│ReplicaSet│coredns-77ccd57875-5m44t              │Running  │0       │k3d-k3s-default-server-0│66 minutes ago
kube-system│ReplicaSet│local-path-provisioner-957fdf8bc-24hmf│Running  │0       │k3d-k3s-default-server-0│66 minutes ago
kube-system│ReplicaSet│metrics-server-648b5df564-hzbwb       │Running  │0       │k3d-k3s-default-server-0│66 minutes ago
kube-system│ReplicaSet│socks-server-d7c8c4d78-r6jc9          │Running  │0       │k3d-k3s-default-server-0│66 minutes ago
kube-system│DaemonSet │svclb-socks-server-78b973ca-zvf58     │Running  │0       │k3d-k3s-default-server-0│66 minutes ago
kube-system│DaemonSet │svclb-traefik-e1776788-7z2gf          │Running  │0       │k3d-k3s-default-server-0│66 minutes ago
kube-system│ReplicaSet│traefik-64f55bb67d-g2vps              │Running  │0       │k3d-k3s-default-server-0│66 minutes ago
kube-system│Job       │helm-install-traefik-6j5zx            │Succeeded│1       │k3d-k3s-default-server-0│66 minutes ago
kube-system│Job       │helm-install-traefik-crd-z59fs        │Succeeded│0       │k3d-k3s-default-server-0│66 minutes ago
[#9 rows]

Command:

oafp cmd="kubectl get nodes -o json" path="items[].{node:metadata.name,totalCPU:status.capacity.cpu,allocCPU:status.allocatable.cpu,totalMem:to_bytesAbbr(from_bytesAbbr(status.capacity.memory)),allocMem:to_bytesAbbr(from_bytesAbbr(status.allocatable.memory)),totalStorage:to_bytesAbbr(from_bytesAbbr(status.capacity.\"ephemeral-storage\")),allocStorage:to_bytesAbbr(to_number(status.allocatable.\"ephemeral-storage\")),conditions:join(\`, \`,status.conditions[].reason)}" output=ctable

Result:

          node          │totalCPU│allocCPU│totalMem│allocMem│totalStorage│allocStorage│                                        conditions                                         
────────────────────────┼────────┼────────┼────────┼────────┼────────────┼────────────┼───────────────────────────────────────────────────────────────────────────────────────────
k3d-k3s-default-server-0│4       │4       │3.85 GB │3.85 GB │77.6 GB     │73.8 GB     │KubeletHasSufficientMemory, KubeletHasNoDiskPressure, KubeletHasSufficientPID, KubeletReady
[#1 row]

Command:

# Build an output table from kubectl get pods with namespace, pod name, container name and corresponding resources
kubectl get pods -A -o json | oafp path="items[].amerge({ ns: metadata.namespace, pod: metadata.name }, spec.containers[].{ container: name, resources: to_slon(resources) })[]" sql="select ns, pod, container, resources order by ns, pod, container" out=ctable

Result:

    ns     │                 name                 │      container       │                           resources                            
───────────┼──────────────────────────────────────┼──────────────────────┼────────────────────────────────────────────────────────────────
kube-system│coredns-77ccd57875-25jr4              │coredns               │[(limits: (memory: 170Mi), requests: (cpu: 100m, memory: 70Mi))]
kube-system│svclb-socks-server-00fc08b8-zgw8m     │lb-tcp-1080           │[()]                                                            
kube-system│local-path-provisioner-957fdf8bc-7vc8g│local-path-provisioner│[()]                                                            
kube-system│metrics-server-648b5df564-prq2r       │metrics-server        │[(requests: (cpu: 100m, memory: 70Mi))]                         
kube-system│svclb-traefik-f37ea49c-tj7lw          │lb-tcp-80,lb-tcp-443  │[() | ()]                                                       
kube-system│helm-install-traefik-crd-xpnrr        │helm                  │[()]                                                            
kube-system│helm-install-traefik-2f4xn            │helm                  │[()]                                                            
kube-system│socks-server-d7c8c4d78-vs4vj          │oaf                   │[()]                                                            
kube-system│traefik-64f55bb67d-tllw7              │traefik               │[()]                                                            
[#9 rows]

Command:

# Build an output table from kubectl get pods with node, namespace, pod name, container name and corresponding resources
kubectl get pods -A -o json | oafp path="items[].amerge({ node: spec.nodeName, ns: metadata.namespace, pod: metadata.name }, spec.containers[].{ container: name, resources: to_slon(resources) })[]" sql="select node, ns, pod, container, resources order by node, ns, pod, container" out=ctable

Result:

          node          │    ns     │                 pod                  │      container       │                          resources                           
────────────────────────┼───────────┼──────────────────────────────────────┼──────────────────────┼──────────────────────────────────────────────────────────────
k3d-k3s-default-server-0│kube-system│coredns-77ccd57875-659xc              │coredns               │(limits: (memory: 170Mi), requests: (cpu: 100m, memory: 70Mi))
k3d-k3s-default-server-0│kube-system│helm-install-traefik-b6l27            │helm                  ()                                                            
k3d-k3s-default-server-0│kube-system│helm-install-traefik-crd-npmc8        │helm                  ()                                                            
k3d-k3s-default-server-0│kube-system│local-path-provisioner-957fdf8bc-kfxkr│local-path-provisioner│()                                                            
k3d-k3s-default-server-0│kube-system│metrics-server-648b5df564-vmjrw       │metrics-server        │(requests: (cpu: 100m, memory: 70Mi))                         
k3d-k3s-default-server-0│kube-system│socks-server-d7c8c4d78-tpwg6          │oaf                   ()                                                            
k3d-k3s-default-server-0│kube-system│svclb-socks-server-dc24b0be-dx4px     │lb-tcp-1080           ()                                                            
k3d-k3s-default-server-0│kube-system│svclb-traefik-9ef3fc4c-sj2ln          │lb-tcp-443            ()                                                            
k3d-k3s-default-server-0│kube-system│svclb-traefik-9ef3fc4c-sj2ln          │lb-tcp-80             ()                                                            
k3d-k3s-default-server-0│kube-system│traefik-64f55bb67d-s6fvf              │traefik               ()                                                            
[#10 rows]

Using Excel

Command:

# Processes each json file in /some/data creating and updating the data.xlsx file with a sheet for each file 
find /some/data -name "*.json" | xargs -I '{}' /bin/sh -c 'oafp file={} output=xls xlsfile=data.xlsx xlsopen=false xlssheet=$(echo {} | sed "s/.*\/\(.*\)\.json/\1/g" )'

Using ChatGPT

Command:

# Setting up the LLM model and gather the data into a data.json file
export OAFP_MODEL="(type: openai, model: gpt-3.5-turbo, key: ..., timeout: 900000)"
echo "list all United Nations secretaries with their corresponding 'name', their mandate 'begin date', their mandate 'end date' and their corresponding secretary 'numeral'" | oafp input=llm output=json > data.json

Result:


oafp data.json
─ secretaries ╭ [0] ╭ name      : Trygve Lie 
              │     ├ begin date: 1946-02-01 
              │     ├ end date  : 1952-11-10 
              │     ╰ numeral   : 1 
              ├ [1] ╭ name      : Dag Hammarskjöld 
              │     ├ begin date: 1953-04-10 
              │     ├ end date  : 1961-09-18 
              │     ╰ numeral   : 2 
              ├ [2] ╭ name      : U Thant 
              │     ├ begin date: 1961-11-30 
              │     ├ end date  : 1971-12-31 
              │     ╰ numeral   : 3 
              ├ [3] ╭ name      : Kurt Waldheim 
              │     ├ begin date: 1972-01-01 
              │     ├ end date  : 1981-12-31 
              │     ╰ numeral   : 4 
              ├ [4] ╭ name      : Javier Pérez de Cuéllar 
              │     ├ begin date: 1982-01-01 
              │     ├ end date  : 1991-12-31 
              │     ╰ numeral   : 5 
              ├ [5] ╭ name      : Boutros Boutros-Ghali 
              │     ├ begin date: 1992-01-01 
              │     ├ end date  : 1996-12-31 
              │     ╰ numeral   : 6 
              ├ [6] ╭ name      : Kofi Annan 
              │     ├ begin date: 1997-01-01 
              │     ├ end date  : 2006-12-31 
              │     ╰ numeral   : 7 
              ├ [7] ╭ name      : Ban Ki-moon 
              │     ├ begin date: 2007-01-01 
              │     ├ end date  : 2016-12-31 
              │     ╰ numeral   : 8 
              ╰ [8] ╭ name      : António Guterres 
                    ├ begin date: 2017-01-01 
                    ├ end date  : present 
                    ╰ numeral   : 9 
oafp data.json path=secretaries output=ctable
         name          │begin date│ end date │numeral
───────────────────────┼──────────┼──────────┼───────
Trygve Lie             │1946-02-01│1952-11-10│1      
Dag Hammarskjöld       │1953-04-10│1961-09-18│2      
U Thant                │1961-11-30│1971-12-31│3      
Kurt Waldheim          │1972-01-01│1981-12-31│4      
Javier Pérez de Cuéllar│1982-01-01│1991-12-31│5      
Boutros Boutros-Ghali  │1992-01-01│1996-12-31│6      
Kofi Annan             │1997-01-01│2006-12-31│7      
Ban Ki-moon            │2007-01-01│2016-12-31│8      
António Guterres       │2017-01-01│present   │9      
[#9 rows]

Using Ollama

Command:

export OAFP_MODEL="(type: ollama, model: 'mistral:instruct', url: 'https://models.local', timeout: 900000)"
echo "Output a JSON array with 15 cities where each entry has the 'city' name, the estimated population and the corresponding 'country'" | oafp input=llm output=json > data.json
oafp data.json output=ctable sql="select * order by population desc"

Result:


   city    │population│ country  
───────────┼──────────┼──────────
Shanghai   │270584000 │China     
Tokyo      │37436958  │Japan     
Delhi      │30290936  │India     
São Paulo  │21935296  │Brazil    
Beijing    │21516000  │China     
Mexico City│21402981  │Mexico    
Mumbai     │20712874  │India     
Cairo      │20636449  │Egypt     
Osaka      │19365701  │Japan     
Dhaka      │18568373  │Bangladesh
[#10 rows]

Using DB

H2

Commands:

# Store the json result of a command into a H2 database table
oaf -c "\$o(listFilesRecursive('.'),{__format:'json'})" | oafp out=db dbjdbc="jdbc:h2:./data" dbuser=sa dbpass=sa dbtable=data
# Perform a SQL query over a H2 database
echo "select * from \"data\"" | oafp in=db indbjdbc="jdbc:h2:./data" indbuser=sa indbpass=sa out=ctable

SQLite

Commands:

# Retrieve and install the JDBC driver for SQLite
ojob ojob.io/db/getDriver op=install db=sqlite
# Store the json result on a SQLite database table
oaf -c "\$o(listFilesRecursive('.'),{__format:'json'})" | oafp out=db dbjdbc="jdbc:sqlite:data.db" dbtable=data dblib=sqlite
# Perform a query over a database using JDBC
echo "select * from data" | oafp in=db indbjdbc="jdbc:sqlite:data.db" indbtable=data indblib=sqlite out=ctable

Using CH

Etcd

Commands:

# Copy the json result of a command into an etcd database
oaf -c "\$o(io.listFiles('.').files,{__format:'json'})" | oafp out=ch ch="(type: etcd3, options: (host: localhost, port: 2379), lib: 'etcd3.js')" chkey=canonicalPath
# Getting all data stored in an etcd database
echo "" | oafp in=ch inch="(type: etcd3, options: (host: localhost, port: 2379), lib: 'etcd3.js')" out=ctable

MVS

Commands:

# Store the json results of a command into a H2 MVStore file
oaf -c "\$o(listFilesRecursive('.'),{__format:'json'})" | oafp out=ch ch="(type: mvs, options: (file: data.db))" chkey=canonicalPath
# Retrieve all keys stores in a H2 MVStore file
echo "" | oafp in=ch inch="(type: mvs, options: (file: data.db))" out=ctable

Using ElasticSearch

Health

Commands:

# Get an overview of the cluster health
curl -s "http://elastic.search:9200/_cat/health?format=json" | oafp out=ctable
# Get indices overview
curl -s "http://elastic.search:9200/_cat/indices?format=json&bytes=b" | oafp sql="select * order by index" out=ctable
# Get cluster nodes overview
curl -s "http://elastic.search:9200/_cat/nodes?format=json" | oafp sql="select * order by ip" out=ctable
# Get per host data allocation
curl -s "http://elastic.search:9200/_cat/allocation?format=json&bytes=b" | oafp sql="select * order by host" out=ctable
# Get current cluster settings flat
curl -s "http://elastic.search:9200/_cluster/settings?include_defaults=true&flat_settings=true" | oafp out=ctree
# Get current cluster settings non-flatted
curl -s "http://elastic.search:9200/_cluster/settings?include_defaults=true" | oafp out=ctree
# Get stats per node
curl -s "http://127.0.0.1:9200/_nodes/stats/indices/search" | oafp out=ctree

Index related

Commands:

# Get the settings for a specific index
curl -s "http://127.0.0.1:9200/kibana_sample_data_flights/_settings" | oafp out=ctree
# Get count per index
curl -s "http://127.0.0.1:9200/kibana_sample_data_flights/_count" | oafp