<?xml version="1.0" encoding="UTF-8"?>
<rss  xmlns:atom="http://www.w3.org/2005/Atom" 
      xmlns:media="http://search.yahoo.com/mrss/" 
      xmlns:content="http://purl.org/rss/1.0/modules/content/" 
      xmlns:dc="http://purl.org/dc/elements/1.1/" 
      version="2.0">
<channel>
<title>ToKnow.ai</title>
<link>https://toknow.ai/</link>
<atom:link href="https://toknow.ai/index.xml" rel="self" type="application/rss+xml"/>
<description>// Research, Numbers and Insightful Ideas //</description>

<generator>quarto-1.6.42</generator>
<lastBuildDate>Fri, 06 Mar 2026 00:00:00 GMT</lastBuildDate>
<item>
  <title>Helios: A 14B Video Model That Runs at 19.5 FPS on a Single GPU</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/helios-bytedance-real-time-14b-video-generation/</link>
  <description><![CDATA[ ByteDance and PKU researchers built a 14-billion parameter video generation model that hits real-time speeds on one H100 GPU, generating minute-long videos without the usual acceleration tricks. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/helios-bytedance-real-time-14b-video-generation/</guid>
  <pubDate>Fri, 06 Mar 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/helios-bytedance-real-time-14b-video-generation/image.png" medium="image" type="image/png" height="76" width="144"/>
</item>
<item>
  <title>Qwen3.5: One Model for Text, Images, Video, and Agent Tasks</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/qwen3-5-unified-native-multimodal-agent-model-alibaba/</link>
  <description><![CDATA[ Alibaba’s Qwen3.5 unifies language, vision, and agent capabilities into a single open-weights model. The flagship variant activates only 3B of its 35B parameters, yet matches models 10x its active size on reasoning and coding benchmarks. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/qwen3-5-unified-native-multimodal-agent-model-alibaba/</guid>
  <pubDate>Fri, 06 Mar 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/qwen3-5-unified-native-multimodal-agent-model-alibaba/image.png" medium="image" type="image/png" height="76" width="144"/>
</item>
<item>
  <title>Experiential Reinforcement Learning: Microsoft’s Reflection Loop Boosts RL Efficiency by 81%</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/experiential-reinforcement-learning-microsoft-reflection-loop/</link>
  <description><![CDATA[ Researchers at USC and Microsoft introduce Experiential Reinforcement Learning (ERL), a training method that embeds an explicit experience-reflection-consolidation loop into RL. It achieves up to 81% gains in complex multi-step environments and 11% on tool-using reasoning tasks over standard RL baselines. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/experiential-reinforcement-learning-microsoft-reflection-loop/</guid>
  <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/experiential-reinforcement-learning-microsoft-reflection-loop/image.png" medium="image" type="image/png" height="76" width="144"/>
</item>
<item>
  <title>RynnBrain: One Open-Source Model for Robots That See, Reason, and Act</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/rynnbrain-open-embodied-foundation-model-spatiotemporal-intelligence/</link>
  <description><![CDATA[ Alibaba DAMO Academy releases RynnBrain, an open-source foundation model that combines perception, reasoning, and planning for robots. Trained on 20 million samples, it outperforms existing models across 20 embodied benchmarks spanning navigation, manipulation, and spatial reasoning. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/rynnbrain-open-embodied-foundation-model-spatiotemporal-intelligence/</guid>
  <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/rynnbrain-open-embodied-foundation-model-spatiotemporal-intelligence/image.png" medium="image" type="image/png" height="76" width="144"/>
</item>
<item>
  <title>SLA2: Sparse-Linear Attention Achieves 18.6x Speedup in Video Diffusion Models</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/sla2-sparse-linear-attention-18x-speedup-video-diffusion/</link>
  <description><![CDATA[ Researchers at Tsinghua and UC Berkeley propose SLA2, reaching 97% attention sparsity and an 18.6x speedup in video diffusion models with no quality loss. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/sla2-sparse-linear-attention-18x-speedup-video-diffusion/</guid>
  <pubDate>Wed, 04 Mar 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/sla2-sparse-linear-attention-18x-speedup-video-diffusion/image.png" medium="image" type="image/png" height="76" width="144"/>
</item>
<item>
  <title>Google DeepMind Lyria 3: Music Generation Model Outputs 48kHz Stereo with Real-Time Steering and SynthID Watermarking</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/google-deepmind-lyria-3-music-generation-synthid-realtime/</link>
  <description><![CDATA[ Google DeepMind releases Lyria 3, an advanced music generation model that creates full multi-instrumental tracks with vocals from text, images, or audio prompts at 48kHz, with a real-time API for live creative steering and SynthID watermarking for attribution. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/google-deepmind-lyria-3-music-generation-synthid-realtime/</guid>
  <pubDate>Tue, 03 Mar 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/google-deepmind-lyria-3-music-generation-synthid-realtime/image.png" medium="image" type="image/png" height="76" width="144"/>
</item>
<item>
  <title>Computer Science Is Still a Relevant Degree</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/computer-science-degree-still-relevant/</link>
  <description><![CDATA[ Research shows that AI coding assistants cut corners, fail on advanced problems, and produce insecure code. The knowledge a computer science education builds is exactly what LLMs cannot replace. ]]></description>
  <category>artificial-intelligence</category>
  <category>software-engineering</category>
  <category>education</category>
  <guid>https://toknow.ai/posts/computer-science-degree-still-relevant/</guid>
  <pubDate>Mon, 02 Mar 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/computer-science-degree-still-relevant/image.png" medium="image" type="image/png" height="76" width="144"/>
</item>
<item>
  <title>Cohere Tiny Aya: 3B-Parameter Multilingual Model Outperforms Larger Competitors in 46 of 61 Languages</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/cohere-tiny-aya-3b-multilingual-70-languages-on-device/</link>
  <description><![CDATA[ Cohere’s Tiny Aya is a 3.35B-parameter model family covering 70 languages. With 4-bit quantization it fits in 2.14 GB, runs at 10 tokens per second on an iPhone 13, and beats Gemma3-4B in translation quality for 46 of 61 languages on WMT24++. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/cohere-tiny-aya-3b-multilingual-70-languages-on-device/</guid>
  <pubDate>Sun, 22 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/cohere-tiny-aya-3b-multilingual-70-languages-on-device/image.png" medium="image" type="image/png" height="81" width="144"/>
</item>
<item>
  <title>World Labs: Spatial Intelligence AI Raises $1 Billion with Autodesk Partnership for 3D World Generation</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/world-labs-billion-dollar-spatial-intelligence-autodesk-3d-world-models/</link>
  <description><![CDATA[ World Labs raises $1 billion including $200 million from Autodesk to integrate AI world models that generate and reason about 3D environments into professional design workflows, pushing spatial intelligence as the next frontier beyond language models. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/world-labs-billion-dollar-spatial-intelligence-autodesk-3d-world-models/</guid>
  <pubDate>Sat, 21 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/world-labs-billion-dollar-spatial-intelligence-autodesk-3d-world-models/image.png" medium="image" type="image/png" height="81" width="144"/>
</item>
<item>
  <title>MiniMax-M2.5: Frontier-Class Agentic Model at One-Tenth the Cost</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/minimax-m25-frontier-model-cost-efficient-agentic-ai/</link>
  <description><![CDATA[ MiniMax-M2.5 is a 229B-parameter model that matches Claude Opus 4.6 and GPT-5 on coding and agentic benchmarks while costing $1 per hour of continuous operation. It scores 80.2% on SWE-bench Verified and completes tasks 37% faster than its predecessor. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/minimax-m25-frontier-model-cost-efficient-agentic-ai/</guid>
  <pubDate>Fri, 20 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/minimax-m25-frontier-model-cost-efficient-agentic-ai/image.png" medium="image" type="image/png" height="72" width="144"/>
</item>
<item>
  <title>Zyphra ZUNA: BCI Foundation Model Uses 4D Rotary Encoding to Generalize Across Any EEG System</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/zyphra-zuna-bci-foundation-model-eeg-4d-rope-diffusion/</link>
  <description><![CDATA[ Zyphra releases ZUNA, a 380M-parameter brain-computer interface foundation model trained on 2 million channel-hours across 208 datasets that uses 4D Rotary Positional Encoding and masked diffusion to reconstruct EEG signals from any electrode layout. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/zyphra-zuna-bci-foundation-model-eeg-4d-rope-diffusion/</guid>
  <pubDate>Fri, 20 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/zyphra-zuna-bci-foundation-model-eeg-4d-rope-diffusion/image.png" medium="image" type="image/png" height="104" width="144"/>
</item>
<item>
  <title>Anthropic Claude Sonnet 4.6: Million Token Context Window Meets Dynamic Filtering for Efficient Search</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/anthropic-claude-sonnet-46-million-token-context-dynamic-filtering/</link>
  <description><![CDATA[ Anthropic releases Claude Sonnet 4.6 with 1 million token context, a Dynamic Filtering technique that cuts token usage by 24% while boosting search accuracy by 11%, and developers prefer it over the costlier Opus 4.5 59% of the time. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/anthropic-claude-sonnet-46-million-token-context-dynamic-filtering/</guid>
  <pubDate>Thu, 19 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/anthropic-claude-sonnet-46-million-token-context-dynamic-filtering/image.svg" medium="image" type="image/svg+xml"/>
</item>
<item>
  <title>NVIDIA PersonaPlex: One Speech Model That Listens, Talks, and Clones Any Voice</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/nvidia-personaplex-full-duplex-speech-voice-persona-control/</link>
  <description><![CDATA[ NVIDIA’s PersonaPlex is a 7B-parameter speech-to-speech model that listens and speaks simultaneously, supports real-time interruptions, and can adopt any voice identity from a short audio prompt. It outperforms both open-source and commercial systems on conversational dynamics benchmarks. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/nvidia-personaplex-full-duplex-speech-voice-persona-control/</guid>
  <pubDate>Thu, 19 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/nvidia-personaplex-full-duplex-speech-voice-persona-control/image.png" medium="image" type="image/png" height="76" width="144"/>
</item>
<item>
  <title>no-same-type-params: An ESLint Rule Against Silent Argument Swaps</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/no-same-type-params/</link>
  <description><![CDATA[ An ESLint rule that catches consecutive function parameters sharing the same type — the kind of bug TypeScript won’t warn you about. ]]></description>
  <category>software-engineering</category>
  <guid>https://toknow.ai/posts/no-same-type-params/</guid>
  <pubDate>Tue, 17 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/no-same-type-params/image.png" medium="image" type="image/png" height="79" width="144"/>
</item>
<item>
  <title>react-use-async: A React Hook for Async Operations That Doesn’t Overthink It</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/react-use-async/</link>
  <description><![CDATA[ A lightweight React hook for managing async operations with loading states, error handling, and data merging — in under 2KB. ]]></description>
  <category>software-engineering</category>
  <guid>https://toknow.ai/posts/react-use-async/</guid>
  <pubDate>Tue, 17 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/react-use-async/image.png" medium="image" type="image/png" height="79" width="144"/>
</item>
<item>
  <title>AI Self-Talk: Internal Speech Enhances Multitask Learning and Generalization</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/ai-inner-speech-self-talk-multitask-generalization/</link>
  <description><![CDATA[ Researchers at OIST found that training AI to generate self-directed inner speech alongside working memory lets it generalize across novel tasks using far less data, pointing to training procedures, not just architecture size, as a key driver of capability. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/ai-inner-speech-self-talk-multitask-generalization/</guid>
  <pubDate>Mon, 16 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/ai-inner-speech-self-talk-multitask-generalization/image.jpg" medium="image" type="image/jpeg"/>
</item>
<item>
  <title>ByteDance Seedance: Multimodal Video Generation Reaches a New Threshold</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/bytedance-seedance-multimodal-video-generation/</link>
  <description><![CDATA[ ByteDance’s Seedance model family, from 1.0 to 2.0, introduces a unified multimodal architecture for joint audio-video generation that accepts text, image, audio, and video inputs simultaneously, generating cinematic-quality clips with synchronized sound in seconds. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/bytedance-seedance-multimodal-video-generation/</guid>
  <pubDate>Mon, 16 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/bytedance-seedance-multimodal-video-generation/image.jpg" medium="image" type="image/jpeg"/>
</item>
<item>
  <title>GLM-5: Open-Source Mixture-of-Experts Closes the Gap with Frontier Models</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/glm5-open-source-mixture-of-experts-frontier-performance/</link>
  <description><![CDATA[ Z.ai’s GLM-5 is a 744B-parameter mixture-of-experts model with only 40B active parameters. Released under the MIT license, it achieves best-in-class open-source performance on reasoning, coding, and agentic benchmarks, rivaling Claude Opus 4.5 and Gemini 3 Pro. ]]></description>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/glm5-open-source-mixture-of-experts-frontier-performance/</guid>
  <pubDate>Mon, 16 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/glm5-open-source-mixture-of-experts-frontier-performance/image.svg" medium="image" type="image/svg+xml"/>
</item>
<item>
  <title>The Prompt-to-Code Paradox: Does Writing Human-Like Code Require a Prompt Longer Than the Code Itself?</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/prompt-length-vs-code-quality/</link>
  <description><![CDATA[ A research-backed technical analysis of why generating high-quality, production-grade code from LLMs demands detailed specifications—and whether the effort rivals writing the code yourself. ]]></description>
  <category>software-engineering</category>
  <category>artificial-intelligence</category>
  <guid>https://toknow.ai/posts/prompt-length-vs-code-quality/</guid>
  <pubDate>Fri, 13 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/prompt-length-vs-code-quality/image.png" medium="image" type="image/png" height="144" width="144"/>
</item>
<item>
  <title>Double Prompting: How Prompt Repetition Improves LLM Accuracy Without Extra Cost</title>
  <dc:creator>Kabui, Charles</dc:creator>
  <link>https://toknow.ai/posts/prompt-repetition-improves-llm-accuracy/</link>
  <description><![CDATA[ Research from Google shows that simply repeating your prompt fixes the ‘Lost in the Middle’ attention problem in LLMs, winning 47 out of 70 benchmark tests with zero losses, and boosting accuracy by up to 76 percentage points on targeted tasks. ]]></description>
  <category>artificial-intelligence</category>
  <category>software-engineering</category>
  <guid>https://toknow.ai/posts/prompt-repetition-improves-llm-accuracy/</guid>
  <pubDate>Fri, 13 Feb 2026 00:00:00 GMT</pubDate>
  <media:content url="https://toknow.ai/posts/prompt-repetition-improves-llm-accuracy/image.png" medium="image" type="image/png" height="144" width="144"/>
</item>
</channel>
</rss>
