deployment
4147 TopicsShow or List F5 XC Routes in the Web
Hi F5ers, After more than two years working with F5 XC, I have decided to explore a functionality to show the host associated with each route "I have requested this functionality to F5, but it´s in design." For anyone who has deployed XC and has created routes into the load balancers, they may have encountered the fact that the routes don't have any description or relevant information, and in the case that they have to find a specific route, it could be almost impossible in an incident, or it will take a lot of time to navigate the menu. So, what I propose as an alternative solution, meanwhile, is F5 solving the request? I have designed a JavaScript that can be integrated into a bookmark "easy way", and if you copy the entire JSON configuration of the load balancer, it will show you in a console over the main XC web page the specific routes and their position in the Routes Menu. The steps to deploy it are: Create a new bookmark and copy the next encoded JavaScript in the URL New Bookmark javascript:(async()=>{const H=h=>{if(!h)return'';const i=h.invert_match?%27NOT %27:%27%27;const n=(h.name||%27%27)+%27%27;if(n.toLowerCase()===%27host%27){if(h.regex)return`${i}Host Regex: ${h.regex}`;if(h.exact)return`${i}Host: ${h.exact}`;if(h.match_value)return`${i}Host: ${h.match_value}`;if(h.value)return`${i}Host: ${h.value}`;if(Array.isArray(h.values)&&h.values.length)return`${i}Host in [${h.values.join(%27 | %27)}]`;return`${i}Host Header Present`}if(h.regex)return`${i}Header Regex: ${n} ~ ${h.regex}`;if(h.exact)return`${i}Header: ${n} = ${h.exact}`;if(h.match_value)return`${i}Header: ${n} = ${h.match_value}`;if(h.value)return`${i}Header: ${n} = ${h.value}`;if(Array.isArray(h.values)&&h.values.length)return`${i}Header: ${n} in [${h.values.join(%27 | %27)}]`;return`${i}Header: ${n} (present)`},S=t=>{try{let s=t.replace(/^\uFEFF/,%27%27).replace(/\u200B/g,%27%27);s=s.replace(/\/\*[^]*?\*\//g,%27%27);s=s.replace(/(^|[^:])\/\/.*$/gm,%27$1%27);s=s.replace(/,\s*([}\]])/g,%27$1%27);return s}catch{return t}},J=t=>{if(!t)return null;try{return JSON.parse(t)}catch{try{return JSON.parse(S(t))}catch{return null}}},G=()=>{try{return(getSelection()?.toString()||%27%27).trim()}catch{return%27%27}},D=()=>{const o=[];document.querySelectorAll(%27pre,code,textarea,div%27).forEach(el=>{const t=(el.innerText||el.textContent||%27%27).trim();if(t&&t.includes(%27"spec"%27)&&t.includes(%27"routes"%27)&&t.includes(%27"metadata"%27))o.push(t)});return o},P=a=>{for(const r of a){let t=r,i=t.indexOf(%27{%27),j=t.lastIndexOf(%27}%27);if(i>=0&&j>i)t=t.slice(i,j+1);const x=J(t);if(x?.spec?.routes)return x}return null},M=()=>{try{if(window.monaco?.editor?.getModels){for(const m of window.monaco.editor.getModels()){const txt=m.getValue?.();const j=J(txt);if(j?.spec?.routes)return j}}}catch{}return null},Q=onOk=>{const host=document.createElement(%27div%27),shadow=host.attachShadow({mode:%27open%27}),ov=document.createElement(%27div%27);ov.style.cssText=%27position:fixed;inset:0;z-index:1000000;background:rgba(0,0,0,.55);display:flex;align-items:center;justify-content:center;outline:none;%27;ov.tabIndex=0;const box=document.createElement(%27div%27);box.style.cssText=%27width:min(960px,92vw);height:min(76vh,720px);background:#111;color:#eee;border:1px solid #444;border-radius:10px;box-shadow:0 8px 24px rgba(0,0,0,.35);display:flex;flex-direction:column';const head=document.createElement('div');head.style.cssText='padding:10px 12px;border-bottom:1px solid #333;font:600 14px system-ui';head.textContent='Pega o carga el JSON del HTTP LB (vista JSON)';const bar=document.createElement('div');bar.style.cssText='display:flex;gap:8px;align-items:center;padding:8px 12px;border-bottom:1px solid #333';const btnRead=document.createElement('button');btnRead.textContent='📋 Leer portapapeles';btnRead.title='Requiere permiso del navegador';btnRead.style.cssText='background:#2b2b2b;color:#ddd;border:1px solid #444;border-radius:6px;padding:6px 10px;cursor:pointer';btnRead.onclick=async()=>{try{const txt=await navigator.clipboard.readText();ta.value=txt;ta.focus()}catch{alert('No se pudo leer del portapapeles. Permite el permiso o usa Archivo.')}};const file=document.createElement('input');file.type='file';file.accept='.json,.txt,application/json,text/plain';file.style.cssText='color:#bbb';file.onchange=async e=>{const f=e.target.files?.[0];if(!f)return;const txt=await f.text();ta.value=txt;ta.focus()};const tip=document.createElement('div');tip.style.cssText='margin-left:auto;color:#aaa;font-size:12px';tip.textContent='Consejo: arrastra y suelta un archivo aquí';bar.append(btnRead,file,tip);const ta=document.createElement('textarea');ta.style.cssText='flex:1;padding:10px 12px;background:#0f0f0f;color:#eee;border:0;outline:none;resize:none;font:12px/1.4 ui-monospace,Menlo,Consolas,monospace';ta.placeholder='Pega aquí el JSON (Ctrl+V). Si la página intercepta, usa "Leer portapapeles" o Archivo.';const pasteToTa=async e=>{try{let d=e.clipboardData?.getData('text/plain');if(!d&&navigator.clipboard?.readText)d=await navigator.clipboard.readText();if(typeof d==='string'){const st=ta.selectionStart??ta.value.length,en=ta.selectionEnd??ta.value.length;ta.value=ta.value.slice(0,st)+d+ta.value.slice(en);const pos=st+d.length;ta.setSelectionRange(pos,pos);ta.focus()}}catch{}};const globalPaste=e=>{e.stopImmediatePropagation?.();e.stopPropagation();e.preventDefault();pasteToTa(e)};window.addEventListener('paste',globalPaste,true);ta.addEventListener('dragover',e=>{e.preventDefault();ta.style.outline='1px dashed #555'});ta.addEventListener('dragleave',()=>ta.style.outline='');ta.addEventListener('drop',async e=>{e.preventDefault();ta.style.outline='';const f=e.dataTransfer.files?.[0];if(f)ta.value=await f.text()});const foot=document.createElement('div');foot.style.cssText='display:flex;gap:10px;justify-content:flex-end;padding:10px 12px;border-top:1px solid #333';const ok=document.createElement('button');ok.textContent='Validar y mostrar';ok.style.cssText='background:#2b2b2b;color:#ddd;border:1px solid #444;border-radius:6px;padding:6px 12px;cursor:pointer';ok.onclick=()=>{const j=J(ta.value);if(!(j?.spec?.routes)){alert('No parece un JSON válido con spec.routes.\nAsegúrate de copiar la vista JSON completa.');return}cleanup();onOk(j)};const cancel=document.createElement('button');cancel.textContent='Cancelar';cancel.style.cssText='background:#222;color:#bbb;border:1px solid #444;border-radius:6px;padding:6px 12px;cursor:pointer';const cleanup=()=>{try{window.removeEventListener('paste',globalPaste,true)}catch{}host.remove()};cancel.onclick=cleanup;foot.append(ok,cancel);box.append(head,bar,ta,foot);ov.append(box);shadow.append(ov);document.body.append(host);setTimeout(()=>ta.focus(),0);ov.addEventListener('mousedown',()=>ta.focus())},A=()=>{const s=G();let j=J(s);if(j?.spec?.routes)return Promise.resolve(j);j=M();if(j?.spec?.routes)return Promise.resolve(j);const hits=D();j=P(hits);if(j?.spec?.routes)return Promise.resolve(j);return new Promise(res=>Q(res))},R=jobj=>{const routes=jobj?.spec?.routes||[],id='xcHostMatchesPanel';document.getElementById(id)?.remove();const panel=document.createElement('div');panel.id=id;panel.style.cssText=['position:fixed','z-index:999999','top:12px','left:12px','max-width:560px','max-height:75vh','overflow:auto','background:#111','color:#eee','border:1px solid #444','border-radius:8px','font:13px/1.35 system-ui,Segoe UI,Roboto,Arial','padding:0','box-shadow:0 8px 24px rgba(0,0,0,.35)','cursor:grab'].join(';');const header=document.createElement('div');header.style.cssText='user-select:none;background:#1b1b1b;border-bottom:1px solid #333;border-top-left-radius:8px;border-top-right-radius:8px;padding:8px 12px;position:relative';header.innerHTML='<div style="font-weight:600">F5 XC — Host match (sin API)</div><div style="opacity:.8;font-size:12px">Fuente: selección/DOM/portapapeles/archivo</div>';const close=document.createElement('button');close.textContent='×';close.title='Cerrar';close.style.cssText='position:absolute;top:6px;right:8px;background:#333;color:#ddd;border:0;border-radius:4px;padding:2px 6px;cursor:pointer';close.addEventListener('pointerdown',e=>{e.stopPropagation();e.preventDefault()});close.addEventListener('click',e=>{e.stopPropagation();e.preventDefault();cleanup()});header.appendChild(close);panel.appendChild(header);const body=document.createElement('div');body.style.cssText='padding:10px 12px 8px';const hr=()=>{const x=document.createElement('div');x.style.cssText='height:1px;background:#333;margin:8px 0';body.appendChild(x)};if(!routes.length){body.append('Sin routes en el JSON.')}else{routes.forEach((r,i)=>{const idx=i+1,s=r.simple_route||{},rd=r.redirect_route||{};let host='';const others=[];(s.headers||[]).forEach(h=>{const t=H(h);((h.name||'').toLowerCase()==='host')?(host=host||t):others.push(t)});(rd.headers||[]).forEach(h=>{const t=H(h);((h.name||'').toLowerCase()==='host')?(host=host||t):others.push(t)});const path=s.path?(s.path.prefix?%60Path Match: ${s.path.prefix}%60:(s.path.regex?%60Path Regex: ${s.path.regex}%60:'')):(rd.path&&rd.path.prefix?%60Path Match: ${rd.path.prefix}%60:'');const type=s?'Simple Route':(rd?'Redirect Route':'(otro)');const block=document.createElement('div');block.style.marginBottom='8px';block.innerHTML=%60<div style="color:#8bd;">#${idx} — ${type}</div>%60+(host?%60<div>• ${host}</div>%60:'<div>• (sin Host)</div>')+(path?%60<div>• ${path}</div>%60:'')+(others.length?%60<div>• ${others.join('<br>• ')}</div>%60:'');body.appendChild(block);hr()})}const foot=document.createElement('div');foot.style.cssText='display:flex;gap:8px;align-items:center;justify-content:space-between';const left=document.createElement('div');left.style.cssText='display:flex;gap:8px;align-items:center';const reset=document.createElement('button');reset.textContent='Reset posición';reset.style.cssText='background:#2b2b2b;color:#ddd;border:1px solid #444;border-radius:4px;padding:4px 8px;cursor:pointer';reset.onclick=()=>{panel.style.left='12px';panel.style.top='12px';panel.style.right='auto';localStorage.removeItem('XC_PANEL_POS')};left.appendChild(reset);foot.appendChild(left);body.appendChild(foot);panel.appendChild(body);document.body.appendChild(panel);const clamp=(v,min,max)=>Math.max(min,Math.min(max,v)),restore=()=>{try{const pos=JSON.parse(localStorage.getItem('XC_PANEL_POS')||'null');if(pos&&typeof pos.left==='number'&&typeof pos.top==='number'){panel.style.left=pos.left+'px';panel.style.top=pos.top+'px';panel.style.right='auto'}}catch{}},save=()=>{try{const r=panel.getBoundingClientRect();localStorage.setItem('XC_PANEL_POS',JSON.stringify({left:Math.round(r.left),top:Math.round(r.top)}))}catch{}};restore();let drag=false,sx=0,sy=0,sl=0,st=0;function onKey(e){if(e.key==='Escape')cleanup()}function cleanup(){try{window.removeEventListener('keydown',onKey)}catch{}panel.remove()}panel.addEventListener('pointerdown',e=>{if(e.button!==0)return;if(e.target.closest("button, a, input, textarea, select, [draggable='true']"))return;drag=true;panel.setPointerCapture(e.pointerId);sx=e.clientX;sy=e.clientY;const r=panel.getBoundingClientRect();sl=r.left;st=r.top;panel.style.willChange='left, top';panel.style.transition='none';panel.style.cursor='grabbing'});panel.addEventListener('pointermove',e=>{if(!drag)return;const dx=e.clientX-sx,dy=e.clientY-sy,w=panel.offsetWidth,h=panel.offsetHeight,maxL=innerWidth-w-6,maxT=innerHeight-h-6,newL=clamp(sl+dx,6,Math.max(6,maxL)),newT=clamp(st+dy,6,Math.max(6,maxT));panel.style.left=newL+'px';panel.style.top=newT+'px';panel.style.right='auto'});panel.addEventListener('pointerup',e=>{if(!drag)return;drag=false;panel.releasePointerCapture(e.pointerId);panel.style.willChange='';panel.style.cursor='grab';save()});window.addEventListener('resize',()=>{save();restore()});window.addEventListener('keydown',onKey)};try{const json=await A();R(json)}catch(e){console.error(e);alert('No fue posible obtener el JSON. Abre la vista JSON del LB o usa el cuadro para pegar/cargar.')}})(); If you want to explore the JavaScript code, I will leave it at the end of the publication. How does it work? Copy or upload the JSON code of the load balancer In the XC web menu, execute the bookmark and copy the JSON code, and then click on validate and show. It shows you the specific routes and number position for each route, giving the possibility to find the required route easily and quickly. Hope it works for anyone who has the same problem as me. The JavaScript code is: (async () => { /** * F5 XC Host Match Viewer (sin API) — blindado contra listeners externos * - Fuentes: Selección | Monaco | DOM | Cuadro (Pegar / Portapapeles / Archivo) * - Intercepción GLOBAL de 'paste' (captura) mientras el cuadro está abierto: * redirige el contenido al <textarea> propio y corta la propagación/defecto. * - Panel arrastrable, ESC/× para cerrar, posición persistente. */ // ---------- Utils ---------- const formatHeader = (h) => { if (!h) return ''; const inv = h.invert_match ? 'NOT ' : ''; const name = (h.name || '').toString(); if (name.toLowerCase() === 'host') { if (h.regex) return `${inv}Host Regex: ${h.regex}`; if (h.exact) return `${inv}Host: ${h.exact}`; if (h.match_value) return `${inv}Host: ${h.match_value}`; if (h.value) return `${inv}Host: ${h.value}`; if (Array.isArray(h.values) && h.values.length) { return `${inv}Host in [${h.values.join(' | ')}]`; } return `${inv}Host Header Present`; } if (h.regex) return `${inv}Header Regex: ${name} ~ ${h.regex}`; if (h.exact) return `${inv}Header: ${name} = ${h.exact}`; if (h.match_value) return `${inv}Header: ${name} = ${h.match_value}`; if (h.value) return `${inv}Header: ${name} = ${h.value}`; if (Array.isArray(h.values) && h.values.length) { return `${inv}Header: ${name} in [${h.values.join(' | ')}]`; } return `${inv}Header: ${name} (present)`; }; const sanitizeJson = (text) => { try { let s = text.replace(/^\uFEFF/, '').replace(/\u200B/g, ''); s = s.replace(/\/\*[^]*?\*\//g, ''); // /* ... */ s = s.replace(/(^|[^:])\/\/.*$/gm, '$1'); // // ... (evita http://) s = s.replace(/,\s*([}\]])/g, '$1'); // comas colgantes return s; } catch { return text; } }; const tryParseJson = (text) => { if (!text) return null; try { return JSON.parse(text); } catch { try { return JSON.parse(sanitizeJson(text)); } catch { return null; } } }; const getSelectionText = () => { try { return (window.getSelection()?.toString() || '').trim(); } catch { return ''; } }; const findDomCandidates = () => { const out = []; document.querySelectorAll('pre,code,textarea,div').forEach(el => { const t = (el.innerText || el.textContent || '').trim(); if (t && t.includes('"spec"') && t.includes('"routes"') && t.includes('"metadata"')) out.push(t); }); return out; }; const parseFirstJson = (texts) => { for (const raw of texts) { let t = raw; const i = t.indexOf('{'), j = t.lastIndexOf('}'); if (i >= 0 && j > i) t = t.slice(i, j + 1); const jn = tryParseJson(t); if (jn?.spec?.routes) return jn; } return null; }; const tryMonacoModels = () => { try { if (window.monaco?.editor?.getModels) { for (const m of window.monaco.editor.getModels()) { const txt = m.getValue?.(); const j = tryParseJson(txt); if (j?.spec?.routes) return j; } } } catch {} return null; }; // ---------- Cuadro Pegar/Archivo con Shadow DOM + PASTE GLOBAL ---------- let modalState = { open: false, ta: null, host: null, removeGlobal: null }; const showPasteOrFileModal = (onOk) => { // Shadow host para aislar el cuadro const host = document.createElement('div'); const shadow = host.attachShadow({ mode: 'open' }); // Overlay clicable (lleva el foco al textarea) const ov = document.createElement('div'); ov.style.cssText = 'position:fixed;inset:0;z-index:1000000;background:rgba(0,0,0,.55);display:flex;align-items:center;justify-content:center;outline:none;'; ov.tabIndex = 0; // para recibir foco ov.addEventListener('mousedown', () => ta?.focus()); const box = document.createElement('div'); box.style.cssText = 'width:min(960px,92vw);height:min(76vh,720px);background:#111;color:#eee;border:1px solid #444;border-radius:10px;' + 'box-shadow:0 8px 24px rgba(0,0,0,.35);display:flex;flex-direction:column'; const head = document.createElement('div'); head.style.cssText = 'padding:10px 12px;border-bottom:1px solid #333;font:600 14px system-ui'; head.textContent = 'Pega o carga el JSON del HTTP LB (vista JSON)'; const bar = document.createElement('div'); bar.style.cssText = 'display:flex;gap:8px;align-items:center;padding:8px 12px;border-bottom:1px solid #333'; const btnRead = document.createElement('button'); btnRead.textContent = '📋 Leer portapapeles'; btnRead.title = 'Requiere permiso del navegador'; btnRead.style.cssText = 'background:#2b2b2b;color:#ddd;border:1px solid #444;border-radius:6px;padding:6px 10px;cursor:pointer'; btnRead.onclick = async () => { try { const txt = await navigator.clipboard.readText(); ta.value = txt; ta.focus(); } catch { alert('No se pudo leer del portapapeles. Permite el permiso o usa Archivo.'); } }; const file = document.createElement('input'); file.type = 'file'; file.accept = '.json,.txt,application/json,text/plain'; file.style.cssText = 'color:#bbb'; file.onchange = async (e) => { const f = e.target.files?.[0]; if (!f) return; const txt = await f.text(); ta.value = txt; ta.focus(); }; const tip = document.createElement('div'); tip.style.cssText = 'margin-left:auto;color:#aaa;font-size:12px'; tip.textContent = 'Consejo: arrastra y suelta un archivo aquí'; bar.append(btnRead, file, tip); const ta = document.createElement('textarea'); ta.style.cssText = 'flex:1;padding:10px 12px;background:#0f0f0f;color:#eee;border:0;outline:none;resize:none;font:12px/1.4 ui-monospace,Menlo,Consolas,monospace'; ta.placeholder = 'Pega aquí el JSON (Ctrl+V). Si la página intercepta, usa "Leer portapapeles" o Archivo.'; // Pegar “blindado” en el <textarea> const pasteToTa = async (e) => { try { let data = e.clipboardData?.getData('text/plain'); if (!data && navigator.clipboard?.readText) { // Fallback si el navegador no expone clipboardData al evento data = await navigator.clipboard.readText(); } if (typeof data === 'string') { const start = ta.selectionStart ?? ta.value.length; const end = ta.selectionEnd ?? ta.value.length; ta.value = ta.value.slice(0, start) + data + ta.value.slice(end); const pos = start + data.length; ta.setSelectionRange(pos, pos); ta.focus(); } } catch {} }; // Interceptor GLOBAL (captura) — redirige SIEMPRE el paste al <textarea> const globalPasteCapture = (e) => { if (!modalState.open) return; e.stopImmediatePropagation?.(); e.stopPropagation(); e.preventDefault(); pasteToTa(e); }; window.addEventListener('paste', globalPasteCapture, true); // Drag&drop de archivo al <textarea> ta.addEventListener('dragover', e => { e.preventDefault(); ta.style.outline = '1px dashed #555'; }); ta.addEventListener('dragleave', () => { ta.style.outline = ''; }); ta.addEventListener('drop', async e => { e.preventDefault(); ta.style.outline = ''; const f = e.dataTransfer.files?.[0]; if (f) ta.value = await f.text(); }); const foot = document.createElement('div'); foot.style.cssText = 'display:flex;gap:10px;justify-content:flex-end;padding:10px 12px;border-top:1px solid #333'; const ok = document.createElement('button'); ok.textContent = 'Validar y mostrar'; ok.style.cssText = 'background:#2b2b2b;color:#ddd;border:1px solid #444;border-radius:6px;padding:6px 12px;cursor:pointer'; ok.onclick = () => { const j = tryParseJson(ta.value); if (!(j?.spec?.routes)) { alert('No parece un JSON válido con spec.routes.\nAsegúrate de copiar la vista JSON completa.'); return; } cleanup(); onOk(j); }; const cancel = document.createElement('button'); cancel.textContent = 'Cancelar'; cancel.style.cssText = 'background:#222;color:#bbb;border:1px solid #444;border-radius:6px;padding:6px 12px;cursor:pointer'; const cleanup = () => { try { window.removeEventListener('paste', globalPasteCapture, true); } catch {} modalState = { open: false, ta: null, host: null, removeGlobal: null }; host.remove(); }; cancel.onclick = cleanup; foot.append(ok, cancel); box.append(head, bar, ta, foot); ov.append(box); shadow.append(ov); document.body.append(host); // Estado global del modal modalState = { open: true, ta, host, removeGlobal: () => window.removeEventListener('paste', globalPasteCapture, true) }; // Foco inicial y al pulsar en overlay setTimeout(() => { ta.focus(); }, 0); ov.addEventListener('click', (ev) => { // Si clic fuera de controles, mueve foco al textarea if (ev.target === ov) ta.focus(); }); }; // ---------- Flujo de adquisición ---------- const acquireJson = () => { const sel = getSelectionText(); let j = tryParseJson(sel); if (j?.spec?.routes) return Promise.resolve(j); j = tryMonacoModels(); if (j?.spec?.routes) return Promise.resolve(j); const hits = findDomCandidates(); j = parseFirstJson(hits); if (j?.spec?.routes) return Promise.resolve(j); return new Promise(res => showPasteOrFileModal(res)); }; // ---------- Panel ---------- const drawPanel = (jobj) => { const routes = jobj?.spec?.routes || []; const id = 'xcHostMatchesPanel'; document.getElementById(id)?.remove(); const panel = document.createElement('div'); panel.id = id; panel.style.cssText = [ 'position:fixed','z-index:999999','top:12px','left:12px', 'max-width:560px','max-height:75vh','overflow:auto', 'background:#111','color:#eee','border:1px solid #444','border-radius:8px', 'font:13px/1.35 system-ui,Segoe UI,Roboto,Arial','padding:0', 'box-shadow:0 8px 24px rgba(0,0,0,.35)','cursor:grab' ].join(';'); const header = document.createElement('div'); header.style.cssText = 'user-select:none;background:#1b1b1b;border-bottom:1px solid #333;border-top-left-radius:8px;border-top-right-radius:8px;padding:8px 12px;position:relative'; header.innerHTML = ` <div style="font-weight:600">F5 XC — Host match (sin API)</div> <div style="opacity:.8;font-size:12px">Fuente: selección/DOM/portapapeles/archivo</div> `; const close = document.createElement('button'); close.textContent = '×'; close.title = 'Cerrar'; close.style.cssText = 'position:absolute;top:6px;right:8px;background:#333;color:#ddd;border:0;border-radius:4px;padding:2px 6px;cursor:pointer'; close.addEventListener('pointerdown', (e) => { e.stopPropagation(); e.preventDefault(); }); close.addEventListener('click', (e) => { e.stopPropagation(); e.preventDefault(); cleanup(); }); header.appendChild(close); panel.appendChild(header); const body = document.createElement('div'); body.style.cssText = 'padding:10px 12px 8px'; const hr = () => { const x = document.createElement('div'); x.style.cssText = 'height:1px;background:#333;margin:8px 0'; body.appendChild(x); }; if (!routes.length) { body.append('Sin routes en el JSON.'); } else { routes.forEach((r, i) => { const idx = i + 1; const s = r.simple_route || {}; const rd = r.redirect_route || {}; let hostLine = ''; const others = []; (s.headers || []).forEach(h => { const t = formatHeader(h); ((h.name || '').toLowerCase() === 'host') ? (hostLine = hostLine || t) : others.push(t); }); (rd.headers || []).forEach(h => { const t = formatHeader(h); ((h.name || '').toLowerCase() === 'host') ? (hostLine = hostLine || t) : others.push(t); }); const path = s.path ? (s.path.prefix ? `Path Match: ${s.path.prefix}` : (s.path.regex ? `Path Regex: ${s.path.regex}` : '')) : (rd.path && rd.path.prefix ? `Path Match: ${rd.path.prefix}` : ''); const type = s ? 'Simple Route' : (rd ? 'Redirect Route' : '(otro)'); const block = document.createElement('div'); block.style.marginBottom = '8px'; block.innerHTML = `<div style="color:#8bd;">#${idx} — ${type}</div>` + (hostLine ? `<div>• ${hostLine}</div>` : '<div>• (sin Host)</div>') + (path ? `<div>• ${path}</div>` : '') + (others.length ? `<div>• ${others.join('<br>• ')}</div>` : ''); body.appendChild(block); hr(); }); } const foot = document.createElement('div'); foot.style.cssText = 'display:flex;gap:8px;align-items:center;justify-content:space-between'; const left = document.createElement('div'); left.style.cssText = 'display:flex;gap:8px;align-items:center'; const reset = document.createElement('button'); reset.textContent = 'Reset posición'; reset.style.cssText = 'background:#2b2b2b;color:#ddd;border:1px solid #444;border-radius:4px;padding:4px 8px;cursor:pointer'; reset.onclick = () => { panel.style.left = '12px'; panel.style.top = '12px'; panel.style.right = 'auto'; localStorage.removeItem('XC_PANEL_POS'); }; left.appendChild(reset); foot.appendChild(left); body.appendChild(foot); panel.appendChild(body); document.body.appendChild(panel); // ---- Drag & persistencia ---- const clamp = (v, min, max) => Math.max(min, Math.min(max, v)); const restore = () => { try { const pos = JSON.parse(localStorage.getItem('XC_PANEL_POS') || 'null'); if (pos && typeof pos.left === 'number' && typeof pos.top === 'number') { panel.style.left = pos.left + 'px'; panel.style.top = pos.top + 'px'; panel.style.right = 'auto'; } } catch {} }; const save = () => { try { const r = panel.getBoundingClientRect(); localStorage.setItem('XC_PANEL_POS', JSON.stringify({ left: Math.round(r.left), top : Math.round(r.top), })); } catch {} }; restore(); let dragging = false, sx = 0, sy = 0, sl = 0, st = 0; function onKey(ev) { if (ev.key === 'Escape') cleanup(); } window.addEventListener('keydown', onKey); function cleanup() { try { window.removeEventListener('keydown', onKey); } catch {} panel.remove(); } panel.addEventListener('pointerdown', (e) => { if (e.button !== 0) return; if (e.target.closest("button, a, input, textarea, select, [draggable='true']")) return; dragging = true; panel.setPointerCapture(e.pointerId); sx = e.clientX; sy = e.clientY; const r = panel.getBoundingClientRect(); sl = r.left; st = r.top; panel.style.willChange = 'left, top'; panel.style.transition = 'none'; panel.style.cursor = 'grabbing'; }); panel.addEventListener('pointermove', (e) => { if (!dragging) return; const dx = e.clientX - sx; const dy = e.clientY - sy; const w = panel.offsetWidth; const h = panel.offsetHeight; const maxLeft = innerWidth - w - 6; const maxTop = innerHeight - h - 6; const newLeft = clamp(sl + dx, 6, Math.max(6, maxLeft)); const newTop = clamp(st + dy, 6, Math.max(6, maxTop)); panel.style.left = newLeft + 'px'; panel.style.top = newTop + 'px'; panel.style.right = 'auto'; }); panel.addEventListener('pointerup', (e) => { if (!dragging) return; dragging = false; panel.releasePointerCapture(e.pointerId); panel.style.willChange = ''; panel.style.cursor = 'grab'; save(); }); window.addEventListener('resize', () => { save(); restore(); }); }; // ---------- Ejecuta ---------- try { const json = await (async () => { const sel = getSelectionText(); let j = tryParseJson(sel); if (j?.spec?.routes) return j; j = tryMonacoModels(); if (j?.spec?.routes) return j; const hits = findDomCandidates(); j = parseFirstJson(hits); if (j?.spec?.routes) return j; return await new Promise(res => showPasteOrFileModal(res)); })(); drawPanel(json); } catch (e) { console.error(e); alert('No fue posible obtener el JSON. Abre la vista JSON del LB o usa el cuadro para pegar/cargar.'); } })();8Views1like1CommentGetting Started with the Certified F5 NGINX Gateway Fabric Operator on Red Hat OpenShift
As enterprises modernize their Kubernetes strategies, the shift from standard Ingress Controllers to the Kubernetes Gateway API is redefining how we manage traffic. For years, the F5 NGINX Ingress Controller has been a foundational component in OpenShift environments. With the certification of F5 NGINX Gateway Fabric (NGF) 2.2 for Red Hat OpenShift, that legacy enters its next chapter. This new certified operator brings the high-performance NGINX data plane into the standardized, role-oriented Gateway API model—with full integration into OpenShift Operator Lifecycle Manager (OLM). Whether you're a platform engineer managing cluster ingress or a developer routing traffic to microservices, NGF on OpenShift 4.19+ delivers a unified, secure, and fully supported traffic fabric. In this guide, we walk through installing the operator, configuring the NginxGatewayFabric resource, and addressing OpenShift-specific networking patterns such as NodePort + Route. Why NGINX Gateway Fabric on OpenShift? While Red Hat OpenShift 4.19+ includes native support for the Gateway API (v1.2.1), integrating NGF adds critical enterprise capabilities: ✔ Certified & OpenShift-Ready The operator is fully validated by Red Hat, ensuring UBI-compliant images and compatibility with OpenShift’s strict Security Context Constraints (SCCs). ✔ High Performance, Low Complexity NGF delivers the core benefits long associated with NGINX—efficiency, simplicity, and predictable performance. ✔ Advanced Traffic Capabilities Capabilities like Regular Expression path matching and support for ExternalName services allow for complex, hybrid-cloud traffic patterns. ✔ AI/ML Readiness NGF 2.2 supports the Gateway API Inference Extension, enabling inference-aware routing for GenAI and LLM workloads on platforms like Red Hat OpenShift AI. Prerequisites Before we begin, ensure you have: Cluster Administrator access to an OpenShift cluster (version 4.19 or later is recommended for Gateway API GA support). Access to the OpenShift Console and the oc CLI. Ability to pull images from ghcr.io or your internal mirror. Step 1: Installing the Operator from OperatorHub We leverage the Operator Lifecycle Manager (OLM) for a "point-and-click" installation that handles lifecycle management and upgrades. Log into the OpenShift Web Console as an administrator. Navigate to Operators > OperatorHub. Search for NGINX Gateway Fabric in the search box. Select the NGINX Gateway Fabric Operator card and click Install Accept the default installation mode (All namespaces) or select a specific namespace (e.g. nginx-gateway), and click Install. Wait until the status shows Succeeded. Once installed, the operator will manage NGF lifecycle automatically. Step 2: Configuring the NginxGatewayFabric Resource Unlike the Ingress Controller, which used NginxIngressController resources, NGF uses the NginxGatewayFabric Custom Resource (CR) to configure the control plane and data plane. In the Console, go to Installed Operators > NGINX Gateway Fabric Operator. Click the NginxGatewayFabric tab and select Create NginxGatewayFabric. Select YAML view to configure the deployment specifics. Step 3: Configuring the NginxGatewayFabric Resource NGF uses a Kubernetes Service to expose its data plane. Before the data plane launches, we must tell the Controller how to expose it. Option A - LoadBalancer (ROSA, ARO, Managed OpenShift) By default, the NGINX Gateway Fabric Operator configures the service type as LoadBalancer. On public cloud managed OpenShift services (like ROSA on AWS or ARO on Azure), this native default works out-of-the-box to provision a cloud load balancer. No additional steps required. Option B - NodePort with OpenShift Route (On-Prem/Hybrid) However, for on-premise or bare-metal OpenShift clusters lacking a native LoadBalancer implementation, the common pattern is to use a NodePort service exposed via an OpenShift Route. Update the NGF CR to use NodePort In the Console, go to Installed Operators > NGINX Gateway Fabric Operator. Click the NginxGatewayFabric tab and select NginxGatewayFabric. Select YAML view to directly edit the configuration specifics. Change the spec.nginx.service.type to NodePort: apiVersion: gateway.nginx.org/v1alpha1 kind: NginxGatewayFabric metadata: name: default namespace: nginx-gateway spec: nginx: service: type: NodePort Create the OpenShift Route: After applying the CR, create a Route to expose the NGINX Service. oc create route edge ngf \ --service=nginxgatewayfabric-sample-nginx-gateway-fabric\ --port=http \ -n nginx-gateway Note: This creates an Edge TLS termination route. For passthrough TLS (allowing NGINX to handle certificates), use --passthrough and target the https port. Step 4: Validating the Deployment Verify that the operator has deployed the control plane pods successfully. oc get pod -n nginx-gateway NAME READY STATUS RESTARTS AGE nginx-gateway-fabric-controller-manager-dd6586597-bfdl5 1/1 Running 0 23m nginxgatewayfabric-sample-nginx-gateway-fabric-564cc6df4d-hztm8 1/1 Running 0 18m oc get gatewayclass NAME CONTROLLER ACCEPTED AGE nginx gateway.nginx.org/nginx-gateway-controller True 4d1h You should also see a GatewayClass named nginx. This indicates the controller is ready to manage Gateway resources. Step 5: Functional Check with Gateway API To test traffic, we will use the standard Gateway API resources (Gateway and HTTPRoute) Deploy a Test Application (Cafe Service) Ensure you have a backend service running. You can use a simple service for validation. Create a Gateway This resource opens the listener on the NGINX data plane. apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: cafe spec: gatewayClassName: nginx listeners: - name: http port: 80 protocol: HTTP Create an HTTPRoute This binds the traffic to your backend service. apiVersion: gateway.networking.k8s.io/v1 kind: HTTPRoute metadata: name: coffee spec: parentRefs: - name: cafe hostnames: - "cafe.example.com" rules: - matches: - path: type: PathPrefix value: / backendRefs: - name: coffee port: 80 Test Connectivity If you used Option B (Route), send a request to your OpenShift Route hostname. If you used Option A, send it to the LoadBalancer IP. OpenShift 4.19 Compatibility Meanwhile, it is vital to understand the "under the hood" constraints of OpenShift 4.19: Gateway API Version Pinning: OpenShift 4.19 ships with Gateway API CRDs pinned to v1.2.1. While NGF 2.2 supports v1.3.0 features, it has been conformance-tested against v1.2.1 to ensure stability within OpenShift's version-locked environment. oc get crd gateways.gateway.networking.k8s.io -o yaml | grep "gateway.networking.k8s.io/" gateway.networking.k8s.io/bundle-version: v1.2.1 gateway.networking.k8s.io/channel: standard However, looking ahead, future NGINX Gateway Fabric releases may rely on newer Gateway API specifications that are not natively supported by the pinned CRDs in OpenShift 4.19. If you anticipate running a newer NGF version that may not be compatible with the current OpenShift Gateway API version, please reach out to us to discuss your compatibility requirements. Security Context Constraints (SCC): In previous manual deployments, you might have wrestled with NET_BIND_SERVICE capabilities or creating custom SCCs. The Certified Operator handles these permissions automatically, using UBI-based images that comply with Red Hat's security standards out of the box. Next Steps: AI Inference With NGF running, you are ready for advanced use cases: AI Inference: Explore the Gateway API Inference Extension to route traffic to LLMs efficiently, optimizing GPU usage on Red Hat OpenShift AI. The certified NGINX Gateway Fabric Operator simplifies the operational burden, letting you focus on what matters: delivering secure, high-performance applications and AI workloads. References: NGINX Gateway Fabric Operator on Red Hat Catalog F5 NGINX Gateway Fabric Certified for Red Hat OpenShift NGINX Gateway Fabric Installation Docs235Views3likes1CommentVIP is not responding on SYN after enabling other modules like ASM, APM and AFM.
Hi all, I have an F5 VE running 17.5.1.3 in my lab environment for learning purposes. As back-end I installed the phpauction webpage and all configuration works flawlessly if only the LTM module is enabled. This in the most simple form: Virtual server on port 80. TCP profile HTTP profile Pool Automap When I add another module, for example ASM, the vip stopped working although it's still green/up and not even a security policy has been attached to the vip. Captures show that the SYN is reaching the F5 but I do not get a response from it: tcpdump: verbose output suppressed, use -v or -vv for full protocol decode listening on any, link-type EN10MB (Ethernet), capture size 65535 bytes 16:24:51.691462 IP 192.168.1.100.64282 > 192.168.2.10.80: Flags [S], seq 5173934, win 65535, options [mss 1260,nop,wscale 8,nop,nop,sackOK], length 0 in slot1/tmm1 lis= port=1.1 trunk= 16:24:51.942738 IP 192.168.1.100.64625 > 192.168.2.10.80: Flags [S], seq 1642892817, win 65535, options [mss 1260,nop,wscale 8,nop,nop,sackOK], length 0 in slot1/tmm0 lis= port=1.1 trunk= I checked the back-end connection as well but the F5 is not sending out the SYN to the webserver. So it looks like it's blackholing my traffic. When I disable ASM and use only LTM, everything starts to work again. Even when trying with different modules like APM, the same issue happens. VIP is not responding after only enabling APM or AFM. I tried the following: - Factory reset the machine. - Upgrade to 17.5.1.3. - Enable RST CAUSE. (but there isn't any because the SYN isn't there in the first place) - Force reload config on the mcpd process. - Enabled ltm debugging without receiving any logs about the connection. - Looked into the dos and bot defense logs to see if traffic is dropped at an earlier point in the chain. - Enabled tmm debug without getting any relevant logs. - Changing the vip from standard to fastl4. - Remove http profile. I did play a lot with other modules as well like ASM, APM, AFM, SSLO, DNS, so that's why I though it was a configuration issue at first. But make the machine factory default, did not solve it. Is it possible there are some left overs during my learning path on this machine? Do you know what additional steps I can take to solve this issue? Thanks. Best regards, Mitchel126Views0likes9CommentsRestful API call takes too long
Hi everyone: I encountered the problem of the API call taking too long when using the RESTful API. The entire call process took more than forty seconds. As shown in the figure below: All the APIs have this problem. API call runs successfully but the TTFB part exceeds 40s. However, after I tried adding the default route, the problem disappeared. I tried testing in another environment and found that regardless of whether the default route was configured, the API calls worked fine. But I don't know what configurations in the original environment might have caused this problem. Does F5 have any configurations that cause internet access when calling a RESTful API? Is there anyone who can help me explain this problem? Best regards ecolauce48Views0likes2CommentsAbout f5 access scheme additional functionality
Regarding the F5 Access Scheme for F5 APM, the feature is currently being implemented. https://techdocs.f5.com/en-us/apm-f5-access/apm-f5-access-ios-3-0-1/c_edge_client_chapter_title_addl_config_info/c_f5_access_starting_from_url.html I have referred to the link above. "password A parameter used to specify the password with which to start the connection. When the password parameter is specified, it is used as a one-time password and not saved in the configuration. " I have reviewed the above content, but I am looking into whether there is a way to save the password when clicking on the scheme in the F5 Access App. I would appreciate any help from those with relevant experience. Thank you.43Views0likes1CommentBIG-IP Next Edge Firewall CNF for Edge workloads
Introduction The CNF architecture aligns with cloud-native principles by enabling horizontal scaling, ensuring that applications can expand seamlessly without compromising performance. It preserves the deterministic reliability essential for telecom environments, balancing scalability with the stringent demands of real-time processing. More background information about what value CNF brings to the environment, https://community.f5.com/kb/technicalarticles/from-virtual-to-cloud-native-infrastructure-evolution/342364 Telecom service providers make use of CNFs for performance optimization, Enable efficient and secure processing of N6-LAN traffic at the edge to meet the stringent requirements of 5G networks. Optimize AI-RAN deployments with dynamic scaling and enhanced security, ensuring that AI workloads are processed efficiently and securely at the edge, improving overall network performance. Deploy advanced AI applications at the edge with the confidence of carrier-grade security and traffic management, ensuring real-time processing and analytics for a variety of edge use cases. CNF Firewall Implementation Overview Let’s start with understanding how different CRs are enabled within a CNF implementation this allows CNF to achieve more optimized performance, Capex and Opex. The traditional way of inserting services to the Kubernetes is as below, Moving to a consolidated Dataplane approach saved 60% of the Kubernetes environment’s performance The F5BigFwPolicy Custom Resource (CR) applies industry-standard firewall rules to the Traffic Management Microkernel (TMM), ensuring that only connections initiated by trusted clients will be accepted. When a new F5BigFwPolicy CR configuration is applied, the firewall rules are first sent to the Application Firewall Management (AFM) Pod, where they are compiled into Binary Large Objects (BLOBs) to enhance processing performance. Once the firewall BLOB is compiled, it is sent to the TMM Proxy Pod, which begins inspecting and filtering network packets based on the defined rules. Enabling AFM within BIG-IP Controller Let’s explore how we can enable and configure CNF Firewall. Below is an overview of the steps needed to set up the environment up until the CNF CRs installations [Enabling the AFM] Enabling AFM CR within BIG-IP Controller definition global: afm: enabled: true pccd: enabled: true f5-afm: enabled: true cert-orchestrator: enabled: true afm: pccd: enabled: true image: repository: "local.registry.com" [Configuration] Example for Firewall policy settings apiVersion: "k8s.f5net.com/v1" kind: F5BigFwPolicy metadata: name: "cnf-fw-policy" namespace: "cnf-gateway" spec: rule: - name: allow-10-20-http action: "accept" logging: true servicePolicy: "service-policy1" ipProtocol: tcp source: addresses: - "2002::10:20:0:0/96" zones: - "zone1" - "zone2" destination: ports: - "80" zones: - "zone3" - "zone4" - name: allow-10-30-ftp action: "accept" logging: true ipProtocol: tcp source: addresses: - "2002::10:30:0:0/96" zones: - "zone1" - "zone2" destination: ports: - "20" - "21" zones: - "zone3" - "zone4" - name: allow-us-traffic action: "accept" logging: true source: geos: - "US:California" destination: geos: - "MX:Baja California" - "MX:Chihuahua" - name: drop-all action: "drop" logging: true ipProtocol: any source: addresses: - "::0/0" - "0.0.0.0/0" [Logging & Monitoring] CNF firewall settings allow not only local logging but also to use HSL logging to external logging destinations. apiVersion: "k8s.f5net.com/v1" kind: F5BigLogProfile metadata: name: "cnf-log-profile" namespace: "cnf-gateway" spec: name: "cnf-logs" firewall: enabled: true network: publisher: "cnf-hsl-pub" events: aclMatchAccept: true aclMatchDrop: true tcpEvents: true translationFields: true Verifying the CNF firewall settings can be done through the sidecar container kubectl exec -it deploy/f5-tmm -c debug -n cnf-gateway – bash tmctl -d blade fw_rule_stat context_type context_name ------------ ------------------------------------------ virtual cnf-gateway-cnf-fw-policy-SecureContext_vs rule_name micro_rules counter last_hit_time action ------------------------------------ ----------- ------- ------------- ------ allow-10-20-http-firewallpolicyrule 1 2 1638572860 2 allow-10-30-ftp-firewallpolicyrule 1 5 1638573270 2 Conclusion To conclude our article, we showed how CNFs with consolidated data planes help with optimizing CNF deployments. In this article we went through the overview of BIG-IP Next Edge Firewall CNF implementation, sample configuration and monitoring capabilities. More use cases to cover different use cases to be following. Related content F5BigFwPolicy BIG-IP Next Cloud-Native Network Functions (CNFs) CNF Home135Views2likes2CommentsHow I did it.....again "High-Performance S3 Load Balancing with F5 BIG-IP"
Introduction Welcome back to the "How I did it" series! In the previous installment, we explored the high‑performance S3 load balancing of Dell ObjectScale with F5 BIG‑IP. This follow‑up builds on that foundation with BIG‑IP v21.x’s S3‑focused profiles and how to apply them in the wild. We’ll also put the external monitor to work, validating health with real PUT/GET/DELETE checks so your S3-compatible backends aren’t just “up,” they’re truly dependable. New S3 Profiles for the BIG-IP…..well kind of A big part of why F5 BIG-IP excels is because of its advanced traffic profiles, like TCP and SSL/TLS. These profiles let you fine-tune connection behavior—optimizing throughput, reducing latency, and managing congestion—while enforcing strong encryption and protocol settings for secure, efficient data flow. Available with version 21.x the BIG-IP now includes new S3-specific profiles, (s3-tcp and s3-default-clientssl). These profiles are based off existing default parent profiles, (tcp and clientssl respectively) that have been customized or “tuned” to optimize s3 traffic. Let’s take a closer look. Anatomy of a TCP Profile The BIG-IP includes a number of pre-defined TCP profiles that define how the system manages TCP traffic for virtual servers, controlling aspects like connection setup, data transfer, congestion control, and buffer tuning. These profiles allow administrators to optimize performance for different network conditions by adjusting parameters such as initial congestion window, retransmission timeout, and algorithms like Nagle’s or Delayed ACK. The s3-tcp, (see below) has been tweaked with respect to data transfer and congestion window sizes as well as memory management to optimize typical S3 traffic patterns (i.e. high-throughput data transfer, varying request sizes, large payloads, etc.). Tweaking the Client SSL Profile for S3 Client SSL profiles on BIG-IP define how the system terminates and manages SSL/TLS sessions from clients at the virtual server. They specify critical parameters such as certificates, private keys, cipher suites, and supported protocol versions, enabling secure decryption for advanced traffic handling like HTTP optimization, security policies, and iRules. The s3-default-clientssl has been modified, (see below) from the default client ssl profile to optimize SSL/TLS settings for high-throughput object storage traffic, ensuring better performance and compatibility with S3-specific requirements. Advanced S3-compatible health checking with EAV Has anyone ever told you how cool BIG-IP Extended Application Verification (EAV) aka external monitors are? Okay, I suppose “coolness” is subjective, but EAVs are objectively cool. Let me prove it to you. Health monitoring of backend S3-compatible servers typically involves making an HTTP GET request to either the exposed S3 ingest/egress API endpoint or a liveness probe. Get a 200 and all's good. Wouldn’t it be cool if you could verify a backend server's health by verifying it can actually perform the operations as intended? Fortunately, we can do just that using an EAV monitor. Therefore, based on the transitive property, EAVs are cool. —mic drop The bash script located at the bottom of the page performs health checks on S3-compatible storage by executing PUT, GET, and DELETE operations on a test object. The health check creates a temporary health check file with timestamp, retrieves the file to verify read access, and removes the test file to clean up. If all three operations return the expected HTTP status code, the node is marked up otherwise the node is marked down. Installing and using the EAV health check Import the monitor script Save the bash script, (.sh) extension, (located at the bottom of this page) locally and import the file onto the BIG-IP. Log in to the BIG-IP Configuration Utility and navigate to System > File Management > External Monitor Program File List > Import. Use the file selector to navigate to and select the newly created. bash file, provide a name for the file and select 'Import'. Create a new external monitor Navigate to Local Traffic > Monitors > Create Provide a name for the monitor. Select 'External' for the type, and select the previously uploaded file for the 'External Program'. The 'Interval' and 'Timeout' settings can be modified or left at the default as desired. In addition to the backend host and port, the monitor must pass three (3) additional variables to the backend: bucket - The name of an existing bucket where the monitor can place a small text file. During the health check, the monitor will create a file, request the file and delete the file. access_key - S3-compatible access key with permissions to perform the above operations on the specified bucket. secret_key - corresponding S3-compatible secret key. Select 'Finished' to create the monitor. Associate the monitor with the pool Navigate to Local Traffic > Pools > Pool List and select the relevant backend S3 pool. Under 'Health Monitors' select the newly created monitor and move from 'Available' to the 'Active'. Select 'Update' to save the configuration. Additional Links How I did it - "High-Performance S3 Load Balancing of Dell ObjectScale with F5 BIG-IP" F5 BIG-IP v21.0 brings enhanced AI data delivery and ingestion for S3 workflows Overview of BIG-IP EAV external monitors EAV Bash Script #!/bin/bash ################################################################################ # S3 Health Check Monitor for F5 BIG-IP (External Monitor - EAV) ################################################################################ # # Description: # This script performs health checks on S3-compatible storage by # executing PUT, GET, and DELETE operations on a test object. It uses AWS # Signature Version 4 for authentication and is designed to run as a BIG-IP # External Application Verification (EAV) monitor. # # Usage: # This script is intended to be configured as an external monitor in BIG-IP. # BIG-IP automatically provides the first two arguments: # $1 - Pool member IP address (may be IPv6-mapped format: ::ffff:x.x.x.x) # $2 - Pool member port number # # Additional arguments must be configured in the monitor's "Variables" field: # bucket - S3 bucket name # access_key - Access key for authentication # secret_key - Secret key for authentication # # BIG-IP Monitor Configuration: # Type: External # External Program: /path/to/this/script.sh # Variables: # bucket="your-bucket-name" # access_key="your-access-key" # secret_key="your-secret-key" # # Health Check Logic: # 1. PUT - Creates a temporary health check file with timestamp # 2. GET - Retrieves the file to verify read access # 3. DELETE - Removes the test file to clean up # Success: All three operations return expected HTTP status codes # Failure: Any operation fails or times out # # Exit Behavior: # - Prints "UP" to stdout if all checks pass (BIG-IP marks pool member up) # - Silent exit if any check fails (BIG-IP marks pool member down) # # Requirements: # - openssl (for SHA256 hashing and HMAC signing) # - curl (for HTTP requests) # - xxd (for hex encoding) # - Standard bash utilities (date, cut, sed, awk) # # Notes: # - Handles IPv6-mapped IPv4 addresses from BIG-IP (::ffff:x.x.x.x) # - Uses AWS Signature Version 4 authentication # - Logs activity to syslog (local0.notice) # - Creates temporary files that are automatically cleaned up # # Author: [Gregory Coward/F5] # Version: 1.0 # Last Modified: 12/2025 # ################################################################################ # ===== PARAMETER CONFIGURATION ===== # BIG-IP automatically provides these HOST="$1" # Pool member IP (may include ::ffff: prefix for IPv4) PORT="$2" # Pool member port BUCKET="${bucket}" # S3 bucket name ACCESS_KEY="${access_key}" # S3 access key SECRET_KEY="${secret_key}" # S3 secret key OBJECT="${6:-healthcheck.txt}" # Test object name (default: healthcheck.txt) # Strip IPv6-mapped IPv4 prefix if present (::ffff:10.1.1.1 -> 10.1.1.1) # BIG-IP may pass IPv4 addresses in IPv6-mapped format if [[ "$HOST" =~ ^::ffff: ]]; then HOST="${HOST#::ffff:}" fi # ===== S3/AWS CONFIGURATION ===== ENDPOINT="http://$HOST:$PORT" # S3 endpoint URL SERVICE="s3" # AWS service identifier for signature REGION="" # AWS region (leave empty for S3 compatible such as MinIO/Dell) # ===== TEMPORARY FILE SETUP ===== # Create temporary file for health check upload TMP_FILE=$(mktemp) printf "Health check at %s\n" "$(date)" > "$TMP_FILE" # Ensure temp file is deleted on script exit (success or failure) trap "rm -f $TMP_FILE" EXIT # ===== CRYPTOGRAPHIC HELPER FUNCTIONS ===== # Calculate SHA256 hash and return as hex string # Input: stdin # Output: hex-encoded SHA256 hash hex_of_sha256() { openssl dgst -sha256 -hex | sed 's/^.* //' } # Sign data using HMAC-SHA256 and return hex signature # Args: $1=hex-encoded key, $2=data to sign # Output: hex-encoded signature sign_hmac_sha256_hex() { local key_hex="$1" local data="$2" printf "%s" "$data" | openssl dgst -sha256 -mac HMAC -macopt "hexkey:$key_hex" | awk '{print $2}' } # Sign data using HMAC-SHA256 and return binary as hex # Args: $1=hex-encoded key, $2=data to sign # Output: hex-encoded binary signature (for key derivation chain) sign_hmac_sha256_binary() { local key_hex="$1" local data="$2" printf "%s" "$data" | openssl dgst -sha256 -mac HMAC -macopt "hexkey:$key_hex" -binary | xxd -p -c 256 } # ===== AWS SIGNATURE VERSION 4 IMPLEMENTATION ===== # Generate AWS Signature Version 4 for S3 requests # Args: # $1 - HTTP method (PUT, GET, DELETE, etc.) # $2 - URI path (e.g., /bucket/object) # $3 - Payload hash (SHA256 of request body, or empty hash for GET/DELETE) # $4 - Content-Type header value (empty string if not applicable) # Output: pipe-delimited string "Authorization|Timestamp|Host" aws_sig_v4() { local method="$1" local uri="$2" local payload_hash="$3" local content_type="$4" # Generate timestamp in AWS format (YYYYMMDDTHHMMSSZ) local timestamp=$(date -u +"%Y%m%dT%H%M%SZ" 2>/dev/null || gdate -u +"%Y%m%dT%H%M%SZ") local datestamp=$(date -u +"%Y%m%d") # Build host header (include port if non-standard) local host_header="$HOST" if [ "$PORT" != "80" ] && [ "$PORT" != "443" ]; then host_header="$HOST:$PORT" fi # Build canonical headers and signed headers list local canonical_headers="" local signed_headers="" # Include Content-Type if provided (for PUT requests) if [ -n "$content_type" ]; then canonical_headers="content-type:${content_type}"$'\n' signed_headers="content-type;" fi # Add required headers (must be in alphabetical order) canonical_headers="${canonical_headers}host:${host_header}"$'\n' canonical_headers="${canonical_headers}x-amz-content-sha256:${payload_hash}"$'\n' canonical_headers="${canonical_headers}x-amz-date:${timestamp}" signed_headers="${signed_headers}host;x-amz-content-sha256;x-amz-date" # Build canonical request (AWS Signature V4 format) # Format: METHOD\nURI\nQUERY_STRING\nHEADERS\n\nSIGNED_HEADERS\nPAYLOAD_HASH local canonical_request="${method}"$'\n' canonical_request+="${uri}"$'\n\n' # Empty query string (double newline) canonical_request+="${canonical_headers}"$'\n\n' canonical_request+="${signed_headers}"$'\n' canonical_request+="${payload_hash}" # Hash the canonical request local canonical_hash canonical_hash=$(printf "%s" "$canonical_request" | hex_of_sha256) # Build string to sign local algorithm="AWS4-HMAC-SHA256" local credential_scope="$datestamp/$REGION/$SERVICE/aws4_request" local string_to_sign="${algorithm}"$'\n' string_to_sign+="${timestamp}"$'\n' string_to_sign+="${credential_scope}"$'\n' string_to_sign+="${canonical_hash}" # Derive signing key using HMAC-SHA256 key derivation chain # kSecret = HMAC("AWS4" + secret_key, datestamp) # kRegion = HMAC(kSecret, region) # kService = HMAC(kRegion, service) # kSigning = HMAC(kService, "aws4_request") local k_secret k_secret=$(printf "AWS4%s" "$SECRET_KEY" | xxd -p -c 256) local k_date k_date=$(sign_hmac_sha256_binary "$k_secret" "$datestamp") local k_region k_region=$(sign_hmac_sha256_binary "$k_date" "$REGION") local k_service k_service=$(sign_hmac_sha256_binary "$k_region" "$SERVICE") local k_signing k_signing=$(sign_hmac_sha256_binary "$k_service" "aws4_request") # Calculate final signature local signature signature=$(sign_hmac_sha256_hex "$k_signing" "$string_to_sign") # Return authorization header, timestamp, and host header (pipe-delimited) printf "%s|%s|%s" \ "${algorithm} Credential=${ACCESS_KEY}/${credential_scope}, SignedHeaders=${signed_headers}, Signature=${signature}" \ "$timestamp" \ "$host_header" } # ===== HTTP REQUEST FUNCTION ===== # Execute HTTP request using curl with AWS Signature V4 authentication # Args: # $1 - HTTP method (PUT, GET, DELETE) # $2 - Full URL # $3 - Authorization header value # $4 - Timestamp (x-amz-date header) # $5 - Host header value # $6 - Payload hash (x-amz-content-sha256 header) # $7 - Content-Type (optional, empty for GET/DELETE) # $8 - Data file path (optional, for PUT with body) # Output: HTTP status code (e.g., 200, 404, 500) do_request() { local method="$1" local url="$2" local auth="$3" local timestamp="$4" local host_header="$5" local payload_hash="$6" local content_type="$7" local data_file="$8" # Build curl command with required headers local cmd="curl -s -o /dev/null --connect-timeout 5 --write-out %{http_code} \"$url\"" cmd="$cmd -X $method" cmd="$cmd -H \"Host: $host_header\"" cmd="$cmd -H \"x-amz-date: $timestamp\"" cmd="$cmd -H \"x-amz-content-sha256: $payload_hash\"" # Add optional headers [ -n "$content_type" ] && cmd="$cmd -H \"Content-Type: $content_type\"" cmd="$cmd -H \"Authorization: $auth\"" [ -n "$data_file" ] && cmd="$cmd --data-binary @\"$data_file\"" # Execute request and return HTTP status code eval "$cmd" } # ===== MAIN HEALTH CHECK LOGIC ===== # ===== STEP 1: PUT (Upload Test Object) ===== # Calculate SHA256 hash of the temp file content UPLOAD_HASH=$(openssl dgst -sha256 -binary "$TMP_FILE" | xxd -p -c 256) CONTENT_TYPE="application/octet-stream" # Generate AWS Signature V4 for PUT request SIGN_OUTPUT=$(aws_sig_v4 "PUT" "/$BUCKET/$OBJECT" "$UPLOAD_HASH" "$CONTENT_TYPE") AUTH_PUT=$(cut -d'|' -f1 <<< "$SIGN_OUTPUT") DATE_PUT=$(cut -d'|' -f2 <<< "$SIGN_OUTPUT") HOST_PUT=$(cut -d'|' -f3 <<< "$SIGN_OUTPUT") # Execute PUT request (expect 200 OK) PUT_STATUS=$(do_request "PUT" "$ENDPOINT/$BUCKET/$OBJECT" "$AUTH_PUT" "$DATE_PUT" "$HOST_PUT" "$UPLOAD_HASH" "$CONTENT_TYPE" "$TMP_FILE") # ===== STEP 2: GET (Download Test Object) ===== # SHA256 hash of empty body (for GET requests with no payload) EMPTY_HASH="e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" # Generate AWS Signature V4 for GET request SIGN_OUTPUT=$(aws_sig_v4 "GET" "/$BUCKET/$OBJECT" "$EMPTY_HASH" "") AUTH_GET=$(cut -d'|' -f1 <<< "$SIGN_OUTPUT") DATE_GET=$(cut -d'|' -f2 <<< "$SIGN_OUTPUT") HOST_GET=$(cut -d'|' -f3 <<< "$SIGN_OUTPUT") # Execute GET request (expect 200 OK) GET_STATUS=$(do_request "GET" "$ENDPOINT/$BUCKET/$OBJECT" "$AUTH_GET" "$DATE_GET" "$HOST_GET" "$EMPTY_HASH" "" "") # ===== STEP 3: DELETE (Remove Test Object) ===== # Generate AWS Signature V4 for DELETE request SIGN_OUTPUT=$(aws_sig_v4 "DELETE" "/$BUCKET/$OBJECT" "$EMPTY_HASH" "") AUTH_DEL=$(cut -d'|' -f1 <<< "$SIGN_OUTPUT") DATE_DEL=$(cut -d'|' -f2 <<< "$SIGN_OUTPUT") HOST_DEL=$(cut -d'|' -f3 <<< "$SIGN_OUTPUT") # Execute DELETE request (expect 204 No Content) DEL_STATUS=$(do_request "DELETE" "$ENDPOINT/$BUCKET/$OBJECT" "$AUTH_DEL" "$DATE_DEL" "$HOST_DEL" "$EMPTY_HASH" "" "") # ===== LOG RESULTS ===== # Log all operation results for troubleshooting #logger -p local0.notice "S3 Monitor: PUT=$PUT_STATUS GET=$GET_STATUS DEL=$DEL_STATUS" # ===== EVALUATE HEALTH CHECK RESULT ===== # BIG-IP considers the pool member "UP" only if this script prints "UP" to stdout # Check if all operations returned expected status codes: # PUT: 200 (OK) # GET: 200 (OK) # DELETE: 204 (No Content) if [ "$PUT_STATUS" -eq 200 ] && [ "$GET_STATUS" -eq 200 ] && [ "$DEL_STATUS" -eq 204 ]; then #logger -p local0.notice "S3 Monitor: UP" echo "UP" fi # If any check fails, script exits silently (no "UP" output) # BIG-IP will mark the pool member as DOWN204Views3likes0CommentsBIG-IP VE: 40G Throughput from 4x10G physical NICs
Hello F5 Community, I'm designing a BIG-IP VE deployment and need to achieve 40G throughput from 4x10G physical NICs. After extensive research (including reading K97995640), I've created this flowchart to summarize the options. Can you verify if this understanding is correct? **My Environment:** - Physical server: 4x10G NICs - ESXi 7.0 - BIG-IP VE (Performance LTM license) - Goal: Maximize throughput for data plane **Research Findings:** From F5 K97995640: "Trunking is supported on BIG-IP VE... intended to be used with SR-IOV interfaces but not with the default vmxnet3 driver. [Need 40G to F5 VE] ┌──────┴──---------------------- ────┐ │ │ [F5 controls] [ESXi controls] (F5 does LACP) (ESXi does LACP) │ │ Only SR-IOV Link Aggregation │ │ ┌───┴───┐ ┌───┴───┐ │40G per│ │40G agg │ │ flow │ │10G/flow │ └───────┘ └───────┘Solved127Views0likes5Comments