Event JSON
{
"id": "c5a32cae734c906191cf79717f47ab339ea7c27449ebbfb685a97921b7c4171a",
"pubkey": "d7e82c1e7929caf3d674c475f6cda6ecca20c1add0d4261cb55f960b0f8d7147",
"created_at": 1709296748,
"kind": 1,
"tags": [
[
"p",
"e2aff28ea7ced07c80ea9dce2775bf898658f3ccb5a3e07589c23d8e45bb18c4",
"wss://relay.mostr.pub"
],
[
"p",
"ddc6c81c03da216550654f73121985d8f30636aac98903de01993746bab7bdb3",
"wss://relay.mostr.pub"
],
[
"e",
"b8247c2d5cf611d6296aaf3d7d4ca2d6a0ae32fe5540be6867aaa4f7af790604",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://twit.social/users/MisuseCase/statuses/112020471701249133",
"activitypub"
]
],
"content": "nostr:npub1u2hl9r48emg8eq82nh8zwadl3xr93u7vkk37qavfcg7cu3dmrrzqe4z4d7 Arguably LLMs don’t even “learn” language. They have kind of a probabilistic, entropy-reducing model of language and can arrange words that way. They don’t really “know” what the words “mean.”",
"sig": "174e5be97567889b442c48d62d618a9fa33d62447fc2b7427657713c8bb7fd965c5005cdcba6cc0c99662a3555234e639302aaad478ede0506d8e81a4b0367ce"
}