Event JSON
{
"id": "55ddc2278356686a60b6988db21a279f5abdb4f6c4c66b09f0866ecc21927142",
"pubkey": "97864a80165b391b6c0cdb66aa9b8fa7b3c77d86737d6c9ad3205506fefe0fc8",
"created_at": 1721492739,
"kind": 1,
"tags": [
[
"p",
"bb0526093bc7f94fd056d51262669842fd978190d0f13d3c5e530b7278bbd844",
"wss://relay.mostr.pub"
],
[
"p",
"ae947543152d3049444c930c2d35a0edc7d19a4e517cee01fd1ba03874437724",
"wss://relay.mostr.pub"
],
[
"e",
"a9ae4097a9973b08d89cb0bcac7fe628b6717e9b12c8a0f748db526001623014",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://mstdn.ca/users/michaelgemar/statuses/112819748205273958",
"activitypub"
]
],
"content": "nostr:npub1hvzjvzfmclu5l5zk65fxye5cgt7e0qvs6rcn60z72v9hy79mmpzq4qmle9 I’m really surprised at all the examples of failures based on single instances in the training material, given the huge corpus of text a model trains on, and the tendency to “average out” the text.",
"sig": "ade11facea0ab99279031d67fb6554b003a7fac8b88587b76a2286618097c60cb404a3f52d1b8a26308bdefdc9fedfea2a68f37270c8591b4fb74361bc9c9961"
}