Event JSON
{
"id": "7a120d37afc0d4e884dbd766de67cb7f9d2ed85e52d1371040e887e322b88fb4",
"pubkey": "ece51acfc6c84a5f625a755bed921f6eac6a9daa6f90817047901dbe0231522f",
"created_at": 1710627748,
"kind": 1,
"tags": [
[
"p",
"f15fec4bda02de7a66c34ce8715aca0d5e6bb87545621568c893997f1398bee1",
"wss://relay.mostr.pub"
],
[
"p",
"aadf9e00528eba62656fbffaeacd17040a604968e398e0ba31a62ddf09d4e932",
"wss://relay.mostr.pub"
],
[
"e",
"88877db6c850632fcfe245088997f0bde59de5d78deb3115c10b9b615d5e3f1d",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://mastodon.social/users/mistersql/statuses/112107700143187731",
"activitypub"
]
],
"content": "nostr:npub17907cj76qt085ekrfn58zkk2p40xhwr4g43p26xgjwvh7yuchmssdm49f9 Dual 30xx card are cheap and last time I did dual cards, finicky to keep working. 40xx very expensive, can run small models very fast, large models not at all. Mac studio with maximum memory or MacBook pro with maximum memory can run large models at \"medium\" perf. IMHO, for llm work quality matters more that just speed. Dumb 7b models have far more limited applications",
"sig": "c172cce5441948ff2f0c27d47dcb930a8070e2995fa4bca357dd9504a504800fcd05080dce9cbfa5d03d45de02e5914e8a8eb80cf62d50fec5e029ede6fc3d1d"
}