Event JSON
{
"id": "66ab24f9a7afd08652cd6bb6c09faa763d87740181a9d5a5bc5dcf173b95ff9f",
"pubkey": "b2ac36a4169aee20165e3d932c7da2fd68709497116d20470ccb5212b4842632",
"created_at": 1731201221,
"kind": 1,
"tags": [
[
"p",
"52c96f468ca2a5c7c540edf31b287453e5f8725dcef4619e6fc9b5cf64127f66",
"wss://relay.mostr.pub"
],
[
"p",
"fce95231cd584e791f1f5d977ceac1ef6edb3d3a7a29ada5a657979836cbcb1f",
"wss://relay.mostr.pub"
],
[
"e",
"fb8416952893d2cb15ed0187f41a5debbf04511ccd9e5a9bd5862bcda79e7349",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://mastodon.social/users/pexavc/statuses/113456003241970413",
"activitypub"
]
],
"content": "nostr:npub12tyk735v52ju032qahe3k2r520jlsujaem6xr8n0ex6u7eqj0anq59vnqc Nice, I'll take a look at that too. I have been learning MLX, another Apple ML framework, and it works quite well for LLMs. Will see how that can be used with abusive language detection or fact-checking. Vision wise, I'll try to find properly licensed CoreML models to bundle with it all.",
"sig": "f45282f39b06b94e6c2aa0625f3404255cee03573255ede682924a507d09e42b31834595ef53e826c9af11479f4dc59dc5020a9a2f037e150b33974a1374d134"
}