Event JSON
{
"id": "7272e40727412a0cd7d8d6a42760430a53b36d000b78d689eb2ff54decddb562",
"pubkey": "8b0be93ed69c30e9a68159fd384fd8308ce4bbf16c39e840e0803dcb6c08720e",
"created_at": 1741572008,
"kind": 1,
"tags": [
[
"p",
"7df906491edd0f44dff2a4f0832726cde02fa88974a24199c4dc0e7d3ca08fc6",
"wss://relay.mostr.pub"
],
[
"p",
"81bda0accb54cf59d84c437f506909d42499461c59724d45678b8ad36ddb7482",
"wss://relay.mostr.pub"
],
[
"e",
"6f02a9307775ce3dfb1336f48e3136787776d5671db2dba86dc98fbe834ad354",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://fedi.simonwillison.net/users/simon/statuses/114135663163025706",
"activitypub"
]
],
"content": "nostr:nprofile1qy2hwumn8ghj7un9d3shjtnddaehgu3wwp6kyqpq0husvjg7m585fhlj5ncgxfexehszl2yfwj3yrxwyms88609q3lrq0k05h8 sadly it just doesn't work: AI labs have been trying to train models to follow system prompts even when the user prompt contradicts them and none of them have come up with a 100% robust implementation",
"sig": "bfccc65398f66b45ac6f1b16c09bc4a23cf5e5ea6b4ce4183e27580571014ea2c22d9eea68098fd709511b7bee7426defd87062e6d3f2e71a7a040e87026371b"
}