Event JSON
{
"id": "9f51f5f4ab871c5ac7576d91cd74b70ec5a143a2476b4e25a387aac30a75295a",
"pubkey": "ad159d25c6d90f397ab2c21dca6492cb42079f31b8d80c9970d17c80802bd8a3",
"created_at": 1708020007,
"kind": 1,
"tags": [
[
"p",
"e2aff28ea7ced07c80ea9dce2775bf898658f3ccb5a3e07589c23d8e45bb18c4",
"wss://relay.mostr.pub"
],
[
"p",
"ddc6c81c03da216550654f73121985d8f30636aac98903de01993746bab7bdb3",
"wss://relay.mostr.pub"
],
[
"e",
"a335fdd29d88b814c70eda033ff309ec5a37504815f4b61b19351ae6ecbbb1b3",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://hachyderm.io/users/kellogh/statuses/111936799222931863",
"activitypub"
]
],
"content": "nostr:npub1u2hl9r48emg8eq82nh8zwadl3xr93u7vkk37qavfcg7cu3dmrrzqe4z4d7 i wish these models were a bit more up-front on the sort of accuracy loss incurred when using longer context lengths. they talk about “successfully” using it with 10M tokens, but are vague on what success means",
"sig": "c25c70ce5a323d983b8d3eb324fc2f65c9ba24aa68863310333d86438b7ee9ae7e92c5da02a45f9c58c12dd9256bd456d9454492f4ebbc51e7e4f48af1275963"
}