Markus Eisele on Nostr: Best Practices for Deploying LLM Inference, RAG and Fine Tuning Pipelines on K8s ...
Published at
2024-11-19 09:39:00Event JSON
{
"id": "cee50ee23fa2b109afb5fe3dbb7b43917b6260f23689c14a9d49fb4c6034fc0e",
"pubkey": "f968da2261d45a4a2ebfac6e7f844bdfc6d8e65ef2c3384a1255f7639f24a256",
"created_at": 1732009140,
"kind": 1,
"tags": [
[
"t",
"AIML"
],
[
"t",
"genai"
],
[
"t",
"llm"
],
[
"t",
"kubernetes"
],
[
"proxy",
"https://mastodon.online/users/myfear/statuses/113508951051355741",
"activitypub"
]
],
"content": "Best Practices for Deploying LLM Inference, RAG and Fine Tuning Pipelines on K8s\n https://m.youtube.com/watch?v=EmGe_58524g\n#aiml #genai #llm #Kubernetes",
"sig": "b223f1202f30cdeb84674cfc4be29e29b90b0d2d77ec61117b893bf481e3464fd718ae971204f4f7ddbf8a10d52fded14291096aa3972acaafb1750cff569e9d"
}