Event JSON
{
"id": "c3d407772a23a74ecb7d7a2e5010aa8d2d94fee34fa93477dbd4bea9cfefba2f",
"pubkey": "bd385aa0b579765c6883c5b0eb17e8ae350c988c659510be1e8453557ee38784",
"created_at": 1741398134,
"kind": 1,
"tags": [
[
"p",
"86455a2a5bda6d4982350740698d834d06275a41cb5d177891396ffd01dbad36",
"wss://relay.mostr.pub"
],
[
"p",
"4ebb1885240ebc43fff7e4ff71a4f4a1b75f4e296809b61932f10de3e34c026b",
"wss://relay.mostr.pub"
],
[
"p",
"94ec43f593e3e0e04b79236f0f19966a29396124306da0621bc69338388d48c4",
"wss://relay.mostr.pub"
],
[
"p",
"8b0be93ed69c30e9a68159fd384fd8308ce4bbf16c39e840e0803dcb6c08720e",
"wss://relay.mostr.pub"
],
[
"e",
"31c6a42e185aa9ab12f1c33a9667bb356bdf80a7c31f3d74b2cbe0ec6e818183",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://mastodon.social/users/glyph/statuses/114124268162159377",
"activitypub"
]
],
"content": "nostr:nprofile1qy2hwumn8ghj7un9d3shjtnddaehgu3wwp6kyqpqsez452jmmfk5nq34qaqxnrvrf5rzwkjpedw3w7y389hl6qwm45mqtgr95k I have seen nostr:nprofile1qy2hwumn8ghj7un9d3shjtnddaehgu3wwp6kyqpqf6a33pfyp67y8llhunlhrf855xm47n3fdqymvxfj7yx78c6vqf4sy8ssyg make the claim that local / PC LLM use is actually *less* power (and thus carbon) efficient than datacenter usage, and I absolutely cannot square this with the moral panic pieces telling us that every word that ChatGPT produces ends up befouling ten liters of water or whatever. I don't know how this math works; are the journalists just totally wrong and misunderstanding the mechanics? Is simon mistaken? Is training getting amortized into generating somehow?",
"sig": "d691f65e00566c93e841009594ca4802ec2dfad5408c10e10d3688bcc33a090405b52cae0ce9a9cc0ee7937d4db5fad1480a2a9d6082d0f4a6e9002ec5de615f"
}