Skip to content

Instantly share code, notes, and snippets.

View igmeMarcial's full-sized avatar
🎯
Focusing

Marcial Igme igmeMarcial

🎯
Focusing
View GitHub Profile
@igmeMarcial
igmeMarcial / hf_mcp.py
Created May 7, 2025 23:13 — forked from Wauplin/hf_mcp.py
Python-based HF MCP server
"""
WARNING: This is an experimental implementation. Expect rough edges while using it.
-------------------------------------------------
Defines a FastMCP server that exposes the Hugging Face Hub API as a set of tools.
In practice, all public methods from `HfApi` are exposed as tools, except for the ones dealing with files:
- `create_commit`
- `hf_hub_download`
- `preupload_lfs_files`
import os
from google import genai
from google.genai import types
client = genai.Client(api_key=os.getenv("GEMINI_API_KEY","xxx"))
# Repalce with the youtube url you want to analyze
youtube_url = "https://www.youtube.com/watch?v=RDOMKIw1aF4"
# Prompt to analyze and summarize the Youtube Video
@igmeMarcial
igmeMarcial / get_memory_size.py
Last active May 7, 2025 23:17 — forked from philschmid/get_memory_size.py
Get needed GPU per precision for a Hugging Face Model Id
from typing import Dict, Union
from huggingface_hub import get_safetensors_metadata
import argparse
import sys
# Example:
# python get_gpu_memory.py Qwen/Qwen2.5-7B-Instruct
# Dictionary mapping dtype strings to their byte sizes
bytes_per_dtype: Dict[str, float] = {
@igmeMarcial
igmeMarcial / grpo_demo.py
Created February 1, 2025 00:43 — forked from willccbb/grpo_demo.py
GRPO Llama-1B
# train_grpo.py
import re
import torch
from datasets import load_dataset, Dataset
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import LoraConfig
from trl import GRPOConfig, GRPOTrainer
# Load and prep dataset