Understanding Kubiya workflows and their components
from kubiya import workflow, step @workflow def my_data_pipeline(): # Your workflow logic here pass
data = step.extract( name="extract-data", image="python:3.11", script="extract.py", inputs={"source": "database"} )
if data.quality_score > 0.8: step.proceed_to_production() else: step.alert_data_team()
step.critical_operation( retry=3, backoff="exponential", on_failure="alert" )
analysis = step.inline_agent( message="Analyze these logs for anomalies", runners=["kubiya-hosted"], tools=[log_parser_tool] )
from kubiya import workflow, step @workflow( name="intelligent-deployment", description="AI-assisted deployment with safety checks" ) def deploy_with_intelligence(): # Run tests in Node.js container test_results = step.run_tests( image="node:18", script="npm test", timeout="5m" ) # Analyze results with AI analysis = step.inline_agent( message=f"Analyze these test results: {test_results}", runners=["kubiya-hosted"], tools=[{ "name": "decide-deployment", "type": "function", "description": "Decide if safe to deploy" }] ) # Conditional deployment if analysis.should_deploy: # Build in parallel image = step.build_docker_image( dockerfile="./Dockerfile", tag="myapp:latest" ) # Deploy to Kubernetes step.deploy_to_k8s( manifest="k8s/deployment.yaml", image=image.tag, namespace="production" ) # Notify team step.send_notification( channel="deployments", message="🚀 Deployed successfully!" ) else: # Alert on issues step.create_incident( severity="high", details=analysis.concerns )
Was this page helpful?