Event JSON
{
"id": "af2368e1717e2447fc5fced79024c52c361289cf32299741ed8f152da2c4df6d",
"pubkey": "65fdfc6e50377c80c53d20bddafe8c75197fa116621e619be537db6c7463c454",
"created_at": 1738570578,
"kind": 1,
"tags": [
[
"p",
"86258564f12993bdc24900cfebf19c280d99857beed23c4b5029d0cd2460a390",
"wss://relay.mostr.pub"
],
[
"p",
"80ee8adeda8e5da3db21e3c53e89092adbf79cadb7074e8d61ad5f759803c1c5",
"wss://relay.mostr.pub"
],
[
"zap",
"65fdfc6e50377c80c53d20bddafe8c75197fa116621e619be537db6c7463c454",
"wss://relay.mostr.pub",
"0.915"
],
[
"zap",
"6be38f8c63df7dbf84db7ec4a6e6fbbd8d19dca3b980efad18585c46f04b26f9",
"wss://relay.mostr.pub",
"0.085"
],
[
"e",
"52607bf562f082397c4958d5751fbd00dbff19aecfb233180a3ff840f6ec4b44",
"wss://relay.mostr.pub",
"reply"
],
[
"proxy",
"https://rayci.st/objects/6590b7d8-71a9-4ae8-ab3e-c84362b4e86c",
"activitypub"
]
],
"content": "nostr:nprofile1qy2hwumn8ghj7un9d3shjtnddaehgu3wwp6kyqpqscjc2e839xfmmsjfqr87huvu9qxenptmamfrcj6s98gv6frq5wgqtwgu4k nostr:nprofile1qy2hwumn8ghj7un9d3shjtnddaehgu3wwp6kyqpqsrhg4hk63ew68kepu0znazgf9tdl089dkur5artp440htxqrc8zsurktpc They already have a term for that. It's called \"Synthetic Data\". The issue is that most people training LLMs actually find synthetic data beneficial and just train their AIs on it anyway. Their solution for filtering slop is using \"Reinforcement learning\" where they reward the model for functional code and punish it for broken code. This is how they made DeepSeek R1.",
"sig": "f16fc087b31cae875d75befe4dc8b8e8762d95dacf69c8a03dd1d49ae81447a315d8095b7136f5ec3e9b9d5760ebd517b5bd7c6605c25012116887229ee1d851"
}