#!/usr/bin/env python3
# aditya.py — instantiate me
class AdityaKumarAnupam:
"""
ML Systems Architect who lives in the gap
between research and production.
"""
role = "ML Systems Architect"
location = "India 🇮🇳"
available = True # open to opportunities
focus = [
"Autonomous AI Agents",
"LLM Applications & RAG",
"Computer Vision Pipelines",
"Full-Stack AI Products",
"Production Inference Systems",
]
contact = {
"email" : "aditanupam@gmail.com",
"linkedin" : "linkedin.com/in/aditanupam",
"github" : "github.com/akanupam",
"phone" : "+91-8002809961",
}
def philosophy(self):
return "If it doesn't ship, it doesn't count." |
|
| Domain | What's Happening | |
|---|---|---|
| 🟢 | Autonomous AI Agents | Multi-agent systems that reason, plan, and take actions autonomously |
| 🟢 | LLM Applications | Production-grade RAG pipelines, fine-tuned models, memory systems |
| 🟢 | Computer Vision | Real-time inference pipelines, edge deployment, object detection |
| 🟢 | Full-Stack AI Products | Next.js frontends + FastAPI backends + LLM integrations |
| 🟡 | Inference Optimization | Quantization, distillation, latency reduction at scale |
| 🟡 | Multimodal ML | Vision + language integrated architectures |
┌──────────────────────────────────────────────────────────────────┐
│ │
│ 01 · A model that can't be deployed is a science project. │
│ │
│ 02 · Reproducibility is not optional. It's engineering. │
│ │
│ 03 · The pipeline is the product — not the model inside it. │
│ │
│ 04 · Latency is a UX problem. Treat it like one. │
│ │
│ 05 · Real intelligence survives contact with real data. │
│ │
│ 06 · Own the system end-to-end, or own none of it. │
│ │
└──────────────────────────────────────────────────────────────────┘
// engineered with intent · zero boilerplate · always shipping


