script
6 TopicsExport Virtual Server Configuration in CSV - tmsh cli script
Problem this snippet solves: This is a simple cli script used to collect all the virtuals name, its VIP details, Pool names, members, all Profiles, Irules, persistence associated to each, in all partitions. A sample output would be like below, One can customize the code to extract other fields available too. The same logic can be allowed to pull information's from profiles stats, certificates etc. Update: 5th Oct 2020 Added Pool members capture in the code. After the Pool-Name, Pool-Members column will be found. If a pool does not have members - field not present: "members" will shown in the respective Pool-Members column. If a pool itself is not bound to the VS, then Pool-Name, Pool-Members will have none in the respective columns. Update: 21st Jan 2021 Added logic to look for multiple partitions & collect configs Update: 12th Feb 2021 Added logic to add persistence to sheet. Update: 26th May 2021 Added logic to add state & status to sheet. Update: 24th Oct 2023 Added logic to add hostname, Pool Status,Total-Connections & Current-Connections. Note: The codeshare has multiple version, use the latest version alone. The reason to keep the other versions is for end users to understand & compare, thus helping them to modify to their own requirements. Hope it helps. How to use this snippet: Login to the LTM, create your script by running the below commands and paste the code provided in snippet tmsh create cli script virtual-details So when you list it, it should look something like below, [admin@labltm:Active:Standalone] ~ # tmsh list cli script virtual-details cli script virtual-details { proc script::run {} { puts "Virtual Server,Destination,Pool-Name,Profiles,Rules" foreach { obj } [tmsh::get_config ltm virtual all-properties] { set profiles [tmsh::get_field_value $obj "profiles"] set remprof [regsub -all {\n} [regsub -all"context" [join $profiles "\n"] "context"] " "] set profilelist [regsub -all "profiles " $remprof ""] puts "[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],[tmsh::get_field_value $obj "pool"],$profilelist,[tmsh::get_field_value $obj "rules"]" } } total-signing-status not-all-signed } [admin@labltm:Active:Standalone] ~ # And you can run the script like below, tmsh run cli script virtual-details > /var/tmp/virtual-details.csv And get the output from the saved file, cat /var/tmp/virtual-details.csv Old Codes: cli script virtual-details { proc script::run {} { puts "Virtual Server,Destination,Pool-Name,Profiles,Rules" foreach { obj } [tmsh::get_config ltm virtual all-properties] { set profiles [tmsh::get_field_value $obj "profiles"] set remprof [regsub -all {\n} [regsub -all " context" [join $profiles "\n"] "context"] " "] set profilelist [regsub -all "profiles " $remprof ""] puts "[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],[tmsh::get_field_value $obj "pool"],$profilelist,[tmsh::get_field_value $obj "rules"]" } } total-signing-status not-all-signed } ###=================================================== ###2.0 ###UPDATED CODE BELOW ### DO NOT MIX ABOVE CODE & BELOW CODE TOGETHER ###=================================================== cli script virtual-details { proc script::run {} { puts "Virtual Server,Destination,Pool-Name,Pool-Members,Profiles,Rules" foreach { obj } [tmsh::get_config ltm virtual all-properties] { set poolname [tmsh::get_field_value $obj "pool"] set profiles [tmsh::get_field_value $obj "profiles"] set remprof [regsub -all {\n} [regsub -all " context" [join $profiles "\n"] "context"] " "] set profilelist [regsub -all "profiles " $remprof ""] if { $poolname != "none" }{ set poolconfig [tmsh::get_config /ltm pool $poolname] foreach poolinfo $poolconfig { if { [catch { set member_name [tmsh::get_field_value $poolinfo "members" ]} err] } { set pool_member $err puts "[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,$pool_member,$profilelist,[tmsh::get_field_value $obj "rules"]" } else { set pool_member "" set member_name [tmsh::get_field_value $poolinfo "members" ] foreach member $member_name { append pool_member "[lindex $member 1] " } puts "[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,$pool_member,$profilelist,[tmsh::get_field_value $obj "rules"]" } } } else { puts "[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,none,$profilelist,[tmsh::get_field_value $obj "rules"]" } } } total-signing-status not-all-signed } ###=================================================== ### Version 3.0 ### UPDATED CODE BELOW FOR MULTIPLE PARTITION ### DO NOT MIX ABOVE CODE & BELOW CODE TOGETHER ###=================================================== cli script virtual-details { proc script::run {} { puts "Partition,Virtual Server,Destination,Pool-Name,Pool-Members,Profiles,Rules" foreach all_partitions [tmsh::get_config auth partition] { set partition "[lindex [split $all_partitions " "] 2]" tmsh::cd /$partition foreach { obj } [tmsh::get_config ltm virtual all-properties] { set poolname [tmsh::get_field_value $obj "pool"] set profiles [tmsh::get_field_value $obj "profiles"] set remprof [regsub -all {\n} [regsub -all " context" [join $profiles "\n"] "context"] " "] set profilelist [regsub -all "profiles " $remprof ""] if { $poolname != "none" }{ set poolconfig [tmsh::get_config /ltm pool $poolname] foreach poolinfo $poolconfig { if { [catch { set member_name [tmsh::get_field_value $poolinfo "members" ]} err] } { set pool_member $err puts "$partition,[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,$pool_member,$profilelist,[tmsh::get_field_value $obj "rules"]" } else { set pool_member "" set member_name [tmsh::get_field_value $poolinfo "members" ] foreach member $member_name { append pool_member "[lindex $member 1] " } puts "$partition,[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,$pool_member,$profilelist,[tmsh::get_field_value $obj "rules"]" } } } else { puts "$partition,[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,none,$profilelist,[tmsh::get_field_value $obj "rules"]" } } } } total-signing-status not-all-signed } ###=================================================== ### Version 4.0 ### UPDATED CODE BELOW FOR CAPTURING PERSISTENCE ### DO NOT MIX ABOVE CODE & BELOW CODE TOGETHER ###=================================================== cli script virtual-details { proc script::run {} { puts "Partition,Virtual Server,Destination,Pool-Name,Pool-Members,Profiles,Rules,Persist" foreach all_partitions [tmsh::get_config auth partition] { set partition "[lindex [split $all_partitions " "] 2]" tmsh::cd /$partition foreach { obj } [tmsh::get_config ltm virtual all-properties] { set poolname [tmsh::get_field_value $obj "pool"] set profiles [tmsh::get_field_value $obj "profiles"] set remprof [regsub -all {\n} [regsub -all " context" [join $profiles "\n"] "context"] " "] set profilelist [regsub -all "profiles " $remprof ""] set persist [lindex [lindex [tmsh::get_field_value $obj "persist"] 0] 1] if { $poolname != "none" }{ set poolconfig [tmsh::get_config /ltm pool $poolname] foreach poolinfo $poolconfig { if { [catch { set member_name [tmsh::get_field_value $poolinfo "members" ]} err] } { set pool_member $err puts "$partition,[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,$pool_member,$profilelist,[tmsh::get_field_value $obj "rules"],$persist" } else { set pool_member "" set member_name [tmsh::get_field_value $poolinfo "members" ] foreach member $member_name { append pool_member "[lindex $member 1] " } puts "$partition,[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,$pool_member,$profilelist,[tmsh::get_field_value $obj "rules"],$persist" } } } else { puts "$partition,[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,none,$profilelist,[tmsh::get_field_value $obj "rules"],$persist" } } } } total-signing-status not-all-signed } ###=================================================== ### 5.0 ### UPDATED CODE BELOW ### DO NOT MIX ABOVE CODE & BELOW CODE TOGETHER ###=================================================== cli script virtual-details { proc script::run {} { puts "Partition,Virtual Server,Destination,Pool-Name,Pool-Members,Profiles,Rules,Persist,Status,State" foreach all_partitions [tmsh::get_config auth partition] { set partition "[lindex [split $all_partitions " "] 2]" tmsh::cd /$partition foreach { obj } [tmsh::get_config ltm virtual all-properties] { foreach { status } [tmsh::get_status ltm virtual [tmsh::get_name $obj]] { set vipstatus [tmsh::get_field_value $status "status.availability-state"] set vipstate [tmsh::get_field_value $status "status.enabled-state"] } set poolname [tmsh::get_field_value $obj "pool"] set profiles [tmsh::get_field_value $obj "profiles"] set remprof [regsub -all {\n} [regsub -all " context" [join $profiles "\n"] "context"] " "] set profilelist [regsub -all "profiles " $remprof ""] set persist [lindex [lindex [tmsh::get_field_value $obj "persist"] 0] 1] if { $poolname != "none" }{ set poolconfig [tmsh::get_config /ltm pool $poolname] foreach poolinfo $poolconfig { if { [catch { set member_name [tmsh::get_field_value $poolinfo "members" ]} err] } { set pool_member $err puts "$partition,[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,$pool_member,$profilelist,[tmsh::get_field_value $obj "rules"],$persist,$vipstatus,$vipstate" } else { set pool_member "" set member_name [tmsh::get_field_value $poolinfo "members" ] foreach member $member_name { append pool_member "[lindex $member 1] " } puts "$partition,[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,$pool_member,$profilelist,[tmsh::get_field_value $obj "rules"],$persist,$vipstatus,$vipstate" } } } else { puts "$partition,[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,none,$profilelist,[tmsh::get_field_value $obj "rules"],$persist,$vipstatus,$vipstate" } } } } total-signing-status not-all-signed } Latest Code: cli script virtual-details { proc script::run {} { set hostconf [tmsh::get_config /sys global-settings hostname] set hostname [tmsh::get_field_value [lindex $hostconf 0] hostname] puts "Hostname,Partition,Virtual Server,Destination,Pool-Name,Pool-Status,Pool-Members,Profiles,Rules,Persist,Status,State,Total-Conn,Current-Conn" foreach all_partitions [tmsh::get_config auth partition] { set partition "[lindex [split $all_partitions " "] 2]" tmsh::cd /$partition foreach { obj } [tmsh::get_config ltm virtual all-properties] { foreach { status } [tmsh::get_status ltm virtual [tmsh::get_name $obj]] { set vipstatus [tmsh::get_field_value $status "status.availability-state"] set vipstate [tmsh::get_field_value $status "status.enabled-state"] set total_conn [tmsh::get_field_value $status "clientside.tot-conns"] set curr_conn [tmsh::get_field_value $status "clientside.cur-conns"] } set poolname [tmsh::get_field_value $obj "pool"] set profiles [tmsh::get_field_value $obj "profiles"] set remprof [regsub -all {\n} [regsub -all " context" [join $profiles "\n"] "context"] " "] set profilelist [regsub -all "profiles " $remprof ""] set persist [lindex [lindex [tmsh::get_field_value $obj "persist"] 0] 1] if { $poolname != "none" }{ foreach { p_status } [tmsh::get_status ltm pool $poolname] { set pool_status [tmsh::get_field_value $p_status "status.availability-state"] } set poolconfig [tmsh::get_config /ltm pool $poolname] foreach poolinfo $poolconfig { if { [catch { set member_name [tmsh::get_field_value $poolinfo "members" ]} err] } { set pool_member $err puts "$hostname,$partition,[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,$pool_status,$pool_member,$profilelist,[tmsh::get_field_value $obj "rules"],$persist,$vipstatus,$vipstate,$total_conn,$curr_conn" } else { set pool_member "" set member_name [tmsh::get_field_value $poolinfo "members" ] foreach member $member_name { append pool_member "[lindex $member 1] " } puts "$hostname,$partition,[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,$pool_status,$pool_member,$profilelist,[tmsh::get_field_value $obj "rules"],$persist,$vipstatus,$vipstate,$total_conn,$curr_conn" } } } else { puts "$hostname,$partition,[tmsh::get_name $obj],[tmsh::get_field_value $obj "destination"],$poolname,none,none,$profilelist,[tmsh::get_field_value $obj "rules"],$persist,$vipstatus,$vipstate,$total_conn,$curr_conn" } } } } } Tested this on version: 13.08.5KViews9likes25CommentsExport GTM/DNS Virtual Servers Configuration in CSV - tmsh cli script
Problem this snippet solves: This is a simple cli script used to collect all the virtual-servers name, its destination created in a server or ltm server. A sample output would be like below, How to use this snippet: This is similar to my other share - https://devcentral.f5.com/s/articles/Export-GTM-DNS-Configuration-in-CSV-tmsh-cli-script Login to the GTM/DNS, create your script by running the below commands and paste the code provided in snippet, tmsh create cli script gtm-vs Delete the proc blocks, so it looks something like below, create script gtm-vs { ## PASTE THE CODE HERE ## } and paste the code provided in the snippet. Note: When you paste it, the indentation may be realigned, it shouldn't cause any errors, but the list output would show improperly aligned. Feel free to delete the tab spaces in the code snippet & paste it while creating, so indentation is aligned properly. And you can run the script like below, tmsh run cli script gtm-vs > /var/tmp/gtm-vs-output.csv And get the output from the saved file, open it on excel. Format it & use it for audit & reporting. cat /var/tmp/gtm-vs-output.csv Feel free to add more elements as per your requirements. Code : proc script::run {} { puts "Server,Virtual-Server,Destination" foreach { obj } [tmsh::get_config gtm server] { set server [tmsh::get_name $obj] foreach { vss } [tmsh::get_config gtm server $server virtual-servers] { set vs_set [tmsh::get_field_value $vss virtual-servers] foreach vs $vs_set { set vs_name [tmsh::get_name $vs] puts $server,$vs_name,[tmsh::get_field_value $vs destination] } } } } Tested this on version: 13.11.5KViews3likes2CommentsExport GTM/DNS Configuration in CSV - tmsh cli script
Problem this snippet solves: This is a simple cli script used to collect all the WideIP, LB Method, Status, State, Pool Name, Pool LB, Pool Members, Pool Fall back, Last Resort pool info in CSV format. A sample output would be like below, One can customize the code to extract other fields available too. Check out my other codeshare of LTM report. Note: The codeshare may get multiple version, use the latest version alone. The reason to keep the other versions is for end users to understand & compare, thus helping them to modify to their own requirements. Hope it helps. How to use this snippet: Login to the GTM/DNS, create your script by running the below commands and paste the code provided in snippet, tmsh create cli script gtm-config-parser Delete the proc blocks, so it looks something like below, create script gtm-config-parser { ## PASTE THE CODE HERE ## } and paste the code provided in the snippet. Note: When you paste it, the indentation may be realigned, it shouldn't cause any errors, but the list output would show improperly aligned. Feel free to delete the tab spaces in the code snippet & paste it while creating, so indentation is aligned properly. And you can run the script like below, tmsh run cli script gtm-config-parser > /var/tmp/gtm-config-parser-output.csv And get the output from the saved file, open it on excel. Format it & use it for audit & reporting. cat /var/tmp/gtm-config-parser-output.csv Feel free to add more elements as per your requirements. For version 13.x & higher, there requires a small change in the code. Refer the comments section. Thanks to @azblaster Code : proc script::run {} { puts "WIP,LB-MODE,WIP-STATUS,WIP-STATE,POOL-NAME,POOL-LB,POOL-MEMBERS,POOL-FB,LASTRESORT-POOL" foreach { obj } [tmsh::get_config gtm wideip all-properties] { set wipname [tmsh::get_name $obj] set wippools [tmsh::get_field_value $obj pools] set lbmode [tmsh::get_field_value $obj "pool-lb-mode"] set lastresort [tmsh::get_field_value $obj "last-resort-pool"] foreach { status } [tmsh::get_status gtm wideip $wipname] { set wipstatus [tmsh::get_field_value $status "status.availability-state"] set wipstate [tmsh::get_field_value $status "status.enabled-state"] } foreach wippool $wippools { set pool_name [tmsh::get_name $wippool] set pool_configs [tmsh::get_config /gtm pool $pool_name all-properties] foreach pool_config $pool_configs { set pool_lb [tmsh::get_field_value $pool_config "load-balancing-mode"] set pool_fb [tmsh::get_field_value $pool_config "fallback-mode"] if { [catch { set member_name [tmsh::get_field_value $pool_config "members" ]} err] } { set pool_member $err } else { set pool_member "" set member_name [tmsh::get_field_value $pool_config "members"] foreach member $member_name { append pool_member "[lindex $member 1] " } } puts "$wipname,$lbmode,$wipstatus,$wipstate,$pool_name,$pool_lb,$pool_member,$pool_fb,$lastresort" } } } } Tested this on version: 11.63.7KViews2likes6CommentsKnowledge sharing: Ways to trigger and schedule scripts on the F5 BIG-IP devices.
Problem this snippet solves: Available script and rest-api options for f5 automatic configurations. How to use this snippet: Code : I think that it is interesting to share how on F5 different scripts can be run at different times and states. 1.You can use the cron job like on any linux device to run a script. As I have used this to restart the tomcat and httpd each night with "bigstart restart " or "tmsh restart /sys service " (https://support.f5.com/csp/article/K89999342), because of a bug till I upgade the devices (https://support.f5.com/csp/article/K25554628 ). https://support.f5.com/csp/article/K03108954 2.Newer versions of F5 also have anacron tool that can add some randomness to the timframe when a script is run and many F5 default scripts use this and not the crontab: https://support.f5.com/csp/article/K33730915 3.You can even trigger scripts on the F5 device if the state changes from active to standby or from standby to active by adding the scripts under /config/failover/ . For example if you have a bug for a critical process that causes a failover ( you can use the command show /sys ha-status all-properties to check for this https://support.f5.com/csp/article/K20060182 ) but the device does not reboot or fix the process you can run a script to when the device becomes standby to restart the process. https://support.f5.com/csp/article/K6008 4.You afcource can run scripts at the F5 start time (startup/bootup): https://support.f5.com/csp/article/K11948 5.The final thing thing I can think of is to run a script at the backround that monitors the log and for example when there is a specific message in /var/log/ltm to trigger a tcpdump (in some cases better than creating a rotating tcpdum to catch an issue as per https://support.f5.com/csp/article/K65251607 ). The script can be a bash script with "tail -f" command that is run on the backround or better use the F5 intergrated "icall" feature. Bash: https://www.thegeekstuff.com/2010/12/5-ways-to-execute-linux-command/ Icall: https://devcentral.f5.com/s/articles/what-is-icall-27404 https://devcentral.f5.com/s/articles/run-tcpdump-on-event 5.You can use utility "logger -p" to generate manually log messages in the F5 device's log for testing of your scripts as this is used also for SNMP custom alarm traps tests (for more about SNMP https://support.f5.com/csp/article/K3727 ) https://support.f5.com/csp/article/K86480148 6.You can also trigger scripts from an BIG-IQ device bt you still can't schedule them when to run: https://clouddocs.f5.com/training/community/big-iq-cloud-edition/html/class5/module1/lab6.html 7.Of course the final option is to use ansible or python SDK that uses the F5 rest-api to execute commands on the F5 devices. https://f5-sdk.readthedocs.io/en/latest/ 8. You can even use TCP expect and bash for automations using SSH connection but this is really old way to do things: https://devcentral.f5.com/s/articles/f5-automation-tcl-amp-bash-921 https://f5-sdk.readthedocs.io/en/latest/userguide/ltm_pools_members_code_example.html 9.F5 is well integrated with Ansible and it is better than REST-API Python SDK or TCL for me as even the declarative AS3 interface is supported: https://clouddocs.f5.com/products/orchestration/ansible/devel/ https://clouddocs.f5.com/products/orchestration/ansible/devel/ https://www.f5.com/partners/technology-alliances/ansible Imperative: https://support.f5.com/csp/article/K42420223 https://clouddocs.f5.com/products/orchestration/ansible/devel/usage/playbook_tutorial.html Declaritive: https://www.f5.com/company/blog/f5-as3-and-red-hat-ansible-automation https://clouddocs.f5.com/training/fas-ansible-workshop-101/3.0-as3-intro.html 10.For some automations without rest-api better use the F5 native cli scripts than bash with tmsh commands: https://clouddocs.f5.com/cli/tmsh-reference/v14/modules/cli/cli_script.html https://clouddocs.f5.com/api/tmsh/script__run.html Tested this on version: No Version Found1.1KViews1like0CommentsUsing a BIG-IP EAV external monitor to monitor HTTP/2 h2c servers
Problem this snippet solves: Introduction Beginning in BIG-IP 14.1.0, F5 provides full proxy (client and server side) support for the HTTP/2 protocol. HTTP/2 connections can run over HTTP without TLS in plaintext or HTTPS with TLS encryption. h2 is the protocol identifier for HTTP/2 with TLS and h2c identifies HTTP/2 without TLS. Note: Modern browsers today do not support HTTP/2 unencrypted. Beginning in BIG-IP 15.1.0, F5 introduces 2 new HTTP/2 monitors, http2 and http2_head_f5. They monitor HTTP/2 over TLS but do not monitor h2c. This article describes how you can use Extended Application Verification, EAV or extended monitors to monitor the h2c health of your pool members and nodes. BIG-IP Extended monitors The built-in BIG-IP http2 and http2_head_f5 monitors perform monitoring using HTTP/2 over TLS, while your h2c pool members, which serve content using HTTP/2 on TCP will fail both monitor health checks. Instead, you can configure external monitors to do this. External monitors let you create custom scripts that contain specific logic that is not available in built-in BIG-IP monitors to monitor the health of pool members and nodes. For a complete overview of EAV external monitors and the procedure to implement one, refer to K71282813: Overview of BIG-IP EAV external monitors. An important component of an external monitor is the script which runs a command such as curl or netcat that interacts with the pool member. To monitor h2c service, beginning in BIG-IP 14.1.0, you can use the nghttp command. nghttp differs from curl in how it negotiates and establishes HTTP/2 in the following way: Upgrade header: curl negotiates HTTP/2 by sending an Upgrade header within an HTTP/1.1 connection and switching protocols to HTTP/2. The following is an example: # curl --http2 -Ik http://192.0.2.5 HTTP/1.1 101 Switching Protocols Upgrade: h2c Connection: Upgrade HTTP/2.0 200 Direct: nghttp negotiates HTTP/2 by sending HTTP/2 frames directly to the pool member. The following example shows nghttp sending the initial HTTP/2 SETTINGS frame right after TCP is established. # nghttp -nv http://192.0.2.5 [0.000] Connected [0.001] send SETTINGS frame <length=12, flags=0x00, stream_id=0> (niv=2) How to use this snippet: The external monitor script The external monitoring script in this article uses the nghttp command as follows: nghttp -v http://${node_ip}:${2}${URI} 2> /dev/null | grep -E -i "${RECV}" > /dev/null The server response is piped to grep the ${RECV} variable. When grep is successful, it returns exit status code 0 and the h2c service of the server is marked up. Note: When a command in the script sends any data or output to stdout, the script exits and the external monitor marks the pool member up. For example, if you include an echo up command at the top of your script, the external monitor marks the pool member up and the rest of the code below the command does not run. External script implementation To implement an h2c external monitor, copy and paste the following code and follow the procedure in K71282813: Overview of BIG-IP EAV external monitors. You must define the RECV string in the Variables parameter of your BIG-IP external monitor on the Configuration utility. This is because referring to the nghttp command described above in the script, when RECV is undefined, the grep command will always return status code 0, thereby erroneously marking the pool member up. Optionally define the URI parameter as appropriate in your environment. For example, you can define URI as /index.html. Code : #!/bin/sh # # (c) Copyright 1996-2006, 2010-2013 F5 Networks, Inc. # # This software is confidential and may contain trade secrets that are the # property of F5 Networks, Inc. No part of the software may be disclosed # to other parties without the express written consent of F5 Networks, Inc. # It is against the law to copy the software. No part of the software may # be reproduced, transmitted, or distributed in any form or by any means, # electronic or mechanical, including photocopying, recording, or information # storage and retrieval systems, for any purpose without the express written # permission of F5 Networks, Inc. Our services are only available for legal # users of the program, for instance in the event that we extend our services # by offering the updating of files via the Internet. # # @(#) $Id: //depot/maint/bigip16.0.0/tm_daemon/monitors/sample_monitor#1 $ # # # these arguments supplied automatically for all external pingers: # $1 = IP (::ffff:nnn.nnn.nnn.nnn notation or hostname) # $2 = port (decimal, host byte order) # $3 and higher = additional arguments # # $MONITOR_NAME = name of the monitor # # In this sample script, $3 is the regular expression # # Name of the pidfile pidfile="/var/run/$MONITOR_NAME.$1..$2.pid" # Send signal to the process group to kill our former self and any children # as external monitors are run with SIGHUP blocked if [ -f $pidfile ] then kill -9 -`cat $pidfile` > /dev/null 2>&1 fi echo "$$" > $pidfile # Remove the IPv6/IPv4 compatibility prefix node_ip=`echo $1 | sed 's/::ffff://'` # Using the nghttp utility to get data from the server. # Search the data received for the expected expression. nghttp -v http://${node_ip}:${2}${URI} 2> /dev/null | grep -E -i "${RECV}" > /dev/null status=$? if [ $status -eq 0 ] then # Remove the pidfile before the script echoes anything to stdout and is killed by bigd rm -f $pidfile echo "up" fi # Remove the pidfile before the script ends rm -f $pidfile Tested this on version: No Version Found1.6KViews1like0Comments