update(k8s): add storage and instance type
This commit is contained in:
@@ -87,19 +87,27 @@ def display_kubernetes_nodes():
|
||||
table.add_column("Node Name", style="white")
|
||||
table.add_column("CPU", justify="right", style="yellow")
|
||||
table.add_column("Memory (MiB)", justify="right", style="cyan")
|
||||
table.add_column("Pods Allocatable", justify="right", style="green")
|
||||
table.add_column("Storage", style="green")
|
||||
table.add_column("Instance Type", style="blue")
|
||||
table.add_column("Pods Allocatable", justify="right", style="magenta")
|
||||
|
||||
nodes = v1.list_node()
|
||||
for node in nodes.items:
|
||||
storage = node.metadata.annotations.get("node.kubernetes.io/storage", "N/A")
|
||||
instance_type = node.metadata.labels.get("beta.kubernetes.io/instance-type", "N/A")
|
||||
|
||||
table.add_row(
|
||||
node.metadata.name,
|
||||
node.status.capacity.get("cpu"),
|
||||
f"{round(convert_memory_to_mib(node.status.capacity.get('memory')), 2)}",
|
||||
storage,
|
||||
instance_type,
|
||||
node.status.allocatable.get("pods")
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
|
||||
def display_namespace_usage():
|
||||
console = Console()
|
||||
config.load_incluster_config()
|
||||
@@ -143,5 +151,3 @@ if __name__ == "__main__":
|
||||
display_virtual_machines()
|
||||
display_kubernetes_nodes()
|
||||
display_namespace_usage()
|
||||
console.print("\n🚀 [bold cyan]Dashboard rendering complete![/bold cyan] 🚀")
|
||||
|
||||
|
||||
@@ -85,17 +85,12 @@ def fetch_k8s_data_with_usage():
|
||||
# Export endpoint
|
||||
@router.get("/export")
|
||||
def export_data(format: str = "yaml"):
|
||||
"""
|
||||
API endpoint to export data in YAML or JSON format.
|
||||
"""
|
||||
data = {
|
||||
"metal_nodes": fetch_all("metal_nodes"),
|
||||
"virtual_machines": fetch_all("virtual_machines"),
|
||||
"kubernetes": fetch_k8s_data_with_usage(),
|
||||
}
|
||||
|
||||
logging.info(f"Exporting data: {data}")
|
||||
|
||||
if format.lower() == "yaml":
|
||||
yaml_data = yaml.safe_dump(data, sort_keys=False)
|
||||
return Response(content=yaml_data, media_type="text/yaml")
|
||||
|
||||
@@ -9,12 +9,20 @@ def fetch_k8s_data_with_usage():
|
||||
metrics_client = client.CustomObjectsApi()
|
||||
|
||||
# Fetch nodes
|
||||
nodes = [{
|
||||
nodes = []
|
||||
for node in v1.list_node().items:
|
||||
# Extract storage and instance type from labels or annotations
|
||||
storage = node.metadata.annotations.get("node.kubernetes.io/storage", "N/A")
|
||||
instance_type = node.metadata.labels.get("beta.kubernetes.io/instance-type", "N/A")
|
||||
|
||||
nodes.append({
|
||||
"node_name": node.metadata.name,
|
||||
"cpu": node.status.capacity.get("cpu"),
|
||||
"memory": round(convert_memory_to_mib(node.status.capacity.get("memory")), 2), # Convert to MiB
|
||||
"storage": storage,
|
||||
"instance_type": instance_type,
|
||||
"pods_allocatable": node.status.allocatable.get("pods"),
|
||||
} for node in v1.list_node().items]
|
||||
})
|
||||
|
||||
# Fetch namespaces
|
||||
namespaces = [ns.metadata.name for ns in v1.list_namespace().items]
|
||||
@@ -50,24 +58,15 @@ def fetch_k8s_data_with_usage():
|
||||
|
||||
|
||||
def convert_cpu_to_cores(cpu):
|
||||
"""
|
||||
Convert CPU usage to cores for human-readable format.
|
||||
Handles units: n (nano), u (micro), m (milli), or none (cores).
|
||||
Returns float values for cores, rounded appropriately.
|
||||
"""
|
||||
if "n" in cpu: # Nanocores to cores
|
||||
if "n" in cpu:
|
||||
return round(int(cpu.replace("n", "")) / 1e9, 4)
|
||||
elif "u" in cpu: # Microcores to cores
|
||||
elif "u" in cpu:
|
||||
return round(int(cpu.replace("u", "")) / 1e6, 4)
|
||||
elif "m" in cpu: # Millicores to cores
|
||||
elif "m" in cpu:
|
||||
return round(int(cpu.replace("m", "")) / 1000, 4)
|
||||
return float(cpu) # Already in cores
|
||||
return float(cpu)
|
||||
|
||||
def convert_memory_to_mib(memory):
|
||||
"""
|
||||
Convert memory to MiB (mebibytes).
|
||||
Handles units: Ki (kibibytes), Mi (mebibytes), Gi (gibibytes).
|
||||
"""
|
||||
if "Ki" in memory:
|
||||
return int(memory.replace("Ki", "")) / 1024
|
||||
elif "Mi" in memory:
|
||||
@@ -78,8 +77,4 @@ def convert_memory_to_mib(memory):
|
||||
|
||||
@router.get("/k8s/data")
|
||||
def get_k8s_data():
|
||||
"""
|
||||
API endpoint to fetch Kubernetes data including nodes, namespaces,
|
||||
and namespace resource usage.
|
||||
"""
|
||||
return fetch_k8s_data_with_usage()
|
||||
|
||||
71
app/routes/think.py
Normal file
71
app/routes/think.py
Normal file
@@ -0,0 +1,71 @@
|
||||
import os
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from kubernetes import client, config
|
||||
from typing import List, Dict
|
||||
import requests
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
def fetch_ai_summary(cluster_name: str, nodes: List[Dict], api_url: str, auth_token: str) -> str:
|
||||
"""
|
||||
Sends node data to an external AI API endpoint and retrieves a summary response.
|
||||
"""
|
||||
payload = {
|
||||
"data": {
|
||||
"cluster_name": cluster_name,
|
||||
"nodes": nodes
|
||||
}
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {auth_token}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(api_url, json=payload, headers=headers)
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
if result.get("success"):
|
||||
return result.get("result", "No result provided by the API.")
|
||||
else:
|
||||
return "API responded with success=false."
|
||||
else:
|
||||
return f"API request failed with status code {response.status_code}: {response.text}"
|
||||
except requests.RequestException as e:
|
||||
return f"An error occurred while contacting the API: {str(e)}"
|
||||
|
||||
@router.get("/think/k8s")
|
||||
def think_k8s():
|
||||
"""
|
||||
Fetch Kubernetes data, send it to the AI API, and return the AI-generated summary.
|
||||
"""
|
||||
config.load_incluster_config()
|
||||
v1 = client.CoreV1Api()
|
||||
|
||||
cluster_name = "k8s-cluster"
|
||||
|
||||
# Format node data
|
||||
nodes = []
|
||||
for node in v1.list_node().items:
|
||||
node_data = {
|
||||
"name": node.metadata.name,
|
||||
"cpu": f"{node.status.capacity.get('cpu')} cores",
|
||||
"memory": f"{round(int(node.status.capacity.get('memory').replace('Ki', '')) / 1024 / 1024, 2)} GB",
|
||||
"storage": "N/A",
|
||||
"type": "N/A",
|
||||
"namespaces": [ns.metadata.name for ns in v1.list_namespace().items()]
|
||||
}
|
||||
nodes.append(node_data)
|
||||
|
||||
# Fetch token from environment
|
||||
api_url = os.getenv("AI_API_URL")
|
||||
auth_token = os.getenv("AI_API_TOKEN")
|
||||
|
||||
if not auth_token:
|
||||
raise HTTPException(status_code=500, detail="AI API token is not set. Please set the AI_API_TOKEN environment variable.")
|
||||
|
||||
# Call AI API
|
||||
summary = fetch_ai_summary(cluster_name, nodes, api_url, auth_token)
|
||||
|
||||
return {"summary": summary}
|
||||
Reference in New Issue
Block a user