{"payload":{"pageCount":4,"repositories":[{"type":"Public","name":"NeMo","owner":"NVIDIA","isFork":false,"description":"A scalable generative AI framework built for researchers and developers working on Large Language Models, Multimodal, and Speech AI (Automatic Speech Recognition and Text-to-Speech)","allTopics":["machine-translation","tts","speech-synthesis","neural-networks","deeplearning","speaker-recognition","asr","multimodal","speech-translation","large-language-models","speaker-diariazation","generative-ai"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":88,"issueCount":53,"starsCount":10860,"forksCount":2264,"license":"Apache License 2.0","participation":[21,16,12,14,21,23,17,20,20,18,17,29,29,23,15,19,8,7,11,6,6,11,15,13,5,11,17,21,22,9,17,31,18,40,19,27,39,8,24,34,27,23,38,33,34,21,20,37,36,13,36,36],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-06T01:30:05.127Z"}},{"type":"Public","name":"NeMo-Curator","owner":"NVIDIA","isFork":false,"description":"Scalable toolkit for data curation","allTopics":["data-curation","llm"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":11,"issueCount":33,"starsCount":332,"forksCount":33,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,4,2,4,1,1,5,4,4,2,6,3,3,3,2,5,5],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-06T00:00:14.981Z"}},{"type":"Public","name":"numba-cuda","owner":"NVIDIA","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":4,"starsCount":10,"forksCount":6,"license":"BSD 2-Clause \"Simplified\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,1,11,9,0,3,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T23:26:04.559Z"}},{"type":"Public","name":"NeMo-Framework-Launcher","owner":"NVIDIA","isFork":false,"description":"Provides end-to-end model development pipelines for LLMs and Multimodal models that can be launched on-prem or cloud-native.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":11,"issueCount":23,"starsCount":420,"forksCount":124,"license":"Apache License 2.0","participation":[6,15,11,14,11,10,11,6,15,20,9,0,9,35,7,7,11,7,18,12,21,30,34,25,4,19,25,29,31,15,57,27,31,27,28,42,5,5,14,18,19,24,19,30,54,19,1,28,21,9,7,50],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T22:57:35.804Z"}},{"type":"Public","name":"earth2studio","owner":"NVIDIA","isFork":false,"description":"Open-source deep-learning framework for exploring, building and deploying AI weather/climate workflows.","allTopics":["weather","ai","deep-learning","climate-science"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":0,"starsCount":48,"forksCount":11,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,17,12,5,3,9,1,4,6,2,3,4,4,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T23:01:03.708Z"}},{"type":"Public","name":"TransformerEngine","owner":"NVIDIA","isFork":false,"description":"A library for accelerating Transformer models on NVIDIA GPUs, including using 8-bit floating point (FP8) precision on Hopper and Ada GPUs, to provide better performance with lower memory utilization in both training and inference.","allTopics":["python","machine-learning","deep-learning","gpu","cuda","pytorch","jax","fp8"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":31,"issueCount":112,"starsCount":1605,"forksCount":256,"license":"Apache License 2.0","participation":[8,13,3,6,11,6,4,4,3,1,6,8,9,12,7,6,3,6,9,3,9,8,8,5,0,6,10,9,12,10,9,4,7,7,8,5,7,2,11,7,14,6,13,3,10,7,10,9,25,9,6,7],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T23:12:53.474Z"}},{"type":"Public","name":"warp","owner":"NVIDIA","isFork":false,"description":"A Python framework for high performance GPU simulation and graphics","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":8,"issueCount":45,"starsCount":3625,"forksCount":203,"license":"Other","participation":[7,44,13,11,6,30,24,39,24,4,11,29,14,15,38,45,44,34,21,19,3,29,20,22,0,8,25,31,30,11,6,14,32,49,53,42,29,6,16,6,40,33,42,25,31,19,44,41,20,5,9,58],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T19:16:23.724Z"}},{"type":"Public","name":"cloudai","owner":"NVIDIA","isFork":false,"description":"CloudAI Benchmark Framework","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":5,"issueCount":0,"starsCount":20,"forksCount":10,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T19:38:46.403Z"}},{"type":"Public","name":"NeMo-Aligner","owner":"NVIDIA","isFork":false,"description":"Scalable toolkit for efficient model alignment","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":25,"issueCount":44,"starsCount":424,"forksCount":46,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,8,4,12,1,2,2,0,3,0,5,2,2,1,0,4,3,3,2,2,3,3,4,2,5,0,0,2,0,3,4,4,3,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T19:18:54.193Z"}},{"type":"Public","name":"NeMo-Guardrails","owner":"NVIDIA","isFork":false,"description":"NeMo Guardrails is an open-source toolkit for easily adding programmable guardrails to LLM-based conversational systems.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":27,"issueCount":173,"starsCount":3730,"forksCount":331,"license":"Other","participation":[28,22,32,18,13,17,17,35,74,25,48,37,58,60,57,70,39,45,79,56,25,49,38,26,3,18,49,21,31,48,53,94,51,105,94,70,35,100,10,0,36,13,6,15,3,12,14,60,25,11,17,42],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T15:12:00.282Z"}},{"type":"Public","name":"modulus","owner":"NVIDIA","isFork":false,"description":"Open-source deep-learning framework for building, training, and fine-tuning deep learning models using state-of-the-art Physics-ML methods","allTopics":["machine-learning","deep-learning","physics","pytorch","nvidia-gpu"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":11,"issueCount":99,"starsCount":794,"forksCount":170,"license":"Apache License 2.0","participation":[2,6,7,8,12,2,3,1,3,0,9,9,5,4,2,4,4,8,9,4,9,8,5,2,0,6,3,7,6,4,7,3,7,10,4,2,4,0,5,15,4,3,11,3,1,4,5,1,3,0,10,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T13:09:15.706Z"}},{"type":"Public","name":"nim-deploy","owner":"NVIDIA","isFork":false,"description":"A collection of YAML files, Helm Charts, Operator code, and guides to act as an example reference implementation for NVIDIA NIM deployment.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":6,"issueCount":7,"starsCount":55,"forksCount":21,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-03T23:18:25.724Z"}},{"type":"Public","name":"doca-sosreport","owner":"NVIDIA","isFork":true,"description":"A unified tool for collecting system logs and other debug information","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":538,"license":"GNU General Public License v2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-03T17:19:44.984Z"}},{"type":"Public","name":"spark-rapids-benchmarks","owner":"NVIDIA","isFork":false,"description":"Spark RAPIDS Benchmarks – benchmark sets and utilities for the RAPIDS Accelerator for Apache Spark","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":21,"starsCount":33,"forksCount":26,"license":"Apache License 2.0","participation":[0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,1,2,0,1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,1,0,1,1,1,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-03T15:57:52.501Z"}},{"type":"Public","name":"trt-samples-for-hackathon-cn","owner":"NVIDIA","isFork":false,"description":"Simple samples for TensorRT programming","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":60,"starsCount":1418,"forksCount":335,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-03T05:16:22.972Z"}},{"type":"Public","name":"NVFlare","owner":"NVIDIA","isFork":false,"description":"NVIDIA Federated Learning Application Runtime Environment","allTopics":["python","decentralized","pet","privacy-protection","federated-learning","federated-analytics","federated-computing"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":15,"issueCount":23,"starsCount":568,"forksCount":156,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-03T20:11:25.388Z"}},{"type":"Public","name":"modulus-sym","owner":"NVIDIA","isFork":false,"description":"Framework providing pythonic APIs, algorithms and utilities to be used with Modulus core to physics inform model training as well as higher level abstraction for domain experts","allTopics":["machine-learning","deep-learning","physics","pytorch","nvidia-gpu"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":6,"issueCount":49,"starsCount":137,"forksCount":56,"license":"Apache License 2.0","participation":[0,3,4,2,2,0,1,1,0,0,3,3,2,0,0,1,1,3,1,3,2,2,0,1,0,0,1,1,3,1,0,1,1,1,3,0,0,0,1,2,1,0,0,0,0,0,0,0,2,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-03T22:27:36.949Z"}},{"type":"Public","name":"apex","owner":"NVIDIA","isFork":false,"description":"A PyTorch Extension: Tools for easy mixed precision and distributed training in Pytorch","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":73,"issueCount":637,"starsCount":8161,"forksCount":1354,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-02T09:10:08.579Z"}},{"type":"Public","name":"nim-anywhere","owner":"NVIDIA","isFork":false,"description":"Accelerate your Generative AI with NIMs.","allTopics":["nim","nvidia","llama","rag","llm","langchain","genai","lcel","llama3"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":25,"forksCount":6,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-02T05:25:22.399Z"}},{"type":"Public","name":"NeMo-text-processing","owner":"NVIDIA","isFork":false,"description":"NeMo text processing for ASR and TTS","allTopics":["text-normalization","inverse-text-n"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":7,"issueCount":4,"starsCount":242,"forksCount":76,"license":"Apache License 2.0","participation":[0,1,0,0,2,2,1,1,3,0,0,1,2,1,0,5,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,2,0,1,0,4,1,0,1,0,1,2,4,0,0,0,0,4,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-03T20:42:45.060Z"}},{"type":"Public","name":"cuda-python","owner":"NVIDIA","isFork":false,"description":"CUDA Python Low-level Bindings","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":9,"starsCount":811,"forksCount":63,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-01T21:32:31.052Z"}},{"type":"Public","name":"Megatron-LM","owner":"NVIDIA","isFork":false,"description":"Ongoing research training transformer models at scale","allTopics":["transformers","model-para","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":129,"issueCount":306,"starsCount":9306,"forksCount":2098,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-01T20:12:48.960Z"}},{"type":"Public","name":"workbench-example-hybrid-rag","owner":"NVIDIA","isFork":false,"description":"An NVIDIA AI Workbench example project for Retrieval Augmented Generation (RAG)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":4,"starsCount":64,"forksCount":189,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T23:00:08.676Z"}},{"type":"Public","name":"audio-flamingo","owner":"NVIDIA","isFork":false,"description":"PyTorch implementation of Audio Flamingo: A Novel Audio Language Model with Few-Shot Learning and Dialogue Abilities.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":1,"starsCount":85,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-26T01:42:38.567Z"}},{"type":"Public","name":"swift","owner":"NVIDIA","isFork":true,"description":"OpenStack Storage (Swift). Mirror of code maintained at opendev.org.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":8,"forksCount":1059,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-25T23:05:42.172Z"}},{"type":"Public","name":"numbast","owner":"NVIDIA","isFork":false,"description":"Numbast is a tool to build an automated pipeline that converts CUDA APIs into Numba bindings.","allTopics":["cuda","numba"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":19,"starsCount":14,"forksCount":4,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-26T01:53:30.922Z"}},{"type":"Public","name":"TensorRT-Model-Optimizer","owner":"NVIDIA","isFork":false,"description":"TensorRT Model Optimizer is a unified library of state-of-the-art model optimization techniques such as quantization and sparsity. It compresses deep learning models for downstream deployment frameworks like TensorRT-LLM or TensorRT to optimize inference speed on NVIDIA GPUs.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":15,"starsCount":292,"forksCount":15,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-25T13:51:23.327Z"}},{"type":"Public","name":"nv-cloud-function-helpers","owner":"NVIDIA","isFork":false,"description":"Functions that simplify common tasks with NVIDIA Cloud Functions","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":0,"starsCount":8,"forksCount":2,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-25T11:57:10.440Z"}},{"type":"Public","name":"ChatRTX","owner":"NVIDIA","isFork":false,"description":"A developer reference project for creating Retrieval Augmented Generation (RAG) chatbots on Windows using TensorRT-LLM","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":9,"issueCount":24,"starsCount":2537,"forksCount":287,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-22T02:12:21.738Z"}},{"type":"Public","name":"NeMo-speech-data-processor","owner":"NVIDIA","isFork":false,"description":"A toolkit for processing speech data and creating speech datasets","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":11,"issueCount":1,"starsCount":65,"forksCount":21,"license":"Apache License 2.0","participation":[2,16,20,0,0,0,0,0,0,0,0,0,0,0,0,1,1,20,1,2,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,5,0,0,1,0,1,2,2,1,0,1,0,1,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T09:45:16.084Z"}}],"repositoryCount":112,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"NVIDIA repositories"}