Event JSON
{
"id": "54acbfc57d71b39a9e46150183eaa004d4ba33c0d8c537a44655f9e513b66783",
"pubkey": "b2ac36a4169aee20165e3d932c7da2fd68709497116d20470ccb5212b4842632",
"created_at": 1731200742,
"kind": 1,
"tags": [
[
"p",
"52c96f468ca2a5c7c540edf31b287453e5f8725dcef4619e6fc9b5cf64127f66",
"wss://relay.mostr.pub"
],
[
"p",
"fce95231cd584e791f1f5d977ceac1ef6edb3d3a7a29ada5a657979836cbcb1f",
"wss://relay.mostr.pub"
],
[
"e",
"3d7900773c04b46f90f70c7b418968cf4ac041687969ee6a0b17ddbed166101a",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://mastodon.social/users/pexavc/statuses/113455971860745848",
"activitypub"
]
],
"content": "nostr:npub12tyk735v52ju032qahe3k2r520jlsujaem6xr8n0ex6u7eqj0anq59vnqc what are your thoughts on some on-device models that can help detect harmful content? Thinking of creating a Swift Package to handle a suite of these cases, if you'd like, for the iOS variant.",
"sig": "7c36651eb5da951fd3a6854f00e5a6f6e1c98e3112d31beecf70f2f4bf15bfb3564cefd8cc78f69655075563a52380d9d45f664966e39d93a1a9b2e25741f99b"
}