diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index c494910..d682fba 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -38,10 +38,12 @@ Fixes #(issue number) ## 🧪 Testing -- [ ] Ran `uv run pytest` -- [ ] Tested intelligent scraper with: `scapo scrape run --sources [source] --limit 5` -- [ ] Verified LLM processing worked correctly -- [ ] Checked that only AI/ML content was processed +- [ ] Tested service discovery: `uv run scapo scrape discover --update` +- [ ] Tested targeted scraping: `uv run scapo scrape targeted --service "[service]" --limit 10` +- [ ] Tested batch processing: `uv run scapo scrape batch --category [category] --limit 10 --batch-size 2` +- [ ] Verified LLM extraction quality (checked generated files in `models/`) +- [ ] Tested TUI explorer: `uv run scapo tui` +- [ ] For OpenRouter users: Updated context cache with `uv run scapo update-context` - [ ] Other testing (please describe): ## 📸 Screenshots (if applicable) diff --git a/QUICKSTART.md b/QUICKSTART.md index 12cffa9..474863e 100644 --- a/QUICKSTART.md +++ b/QUICKSTART.md @@ -23,6 +23,9 @@ cp .env.example .env LLM_PROVIDER=openrouter OPENROUTER_API_KEY=sk-or-v1-your-key-here # Get from openrouter.ai OPENROUTER_MODEL=your_model + +# IMPORTANT: Update model context cache for accurate batching +scapo update-context # Without this, defaults to 4096 tokens (poor performance!) ``` #### Option B: Ollama (Local) @@ -117,27 +120,27 @@ scapo models search "copilot" # Search for specific models cat models/audio/eleven-labs/cost_optimization.md ``` -### 5. (Optional) Use with Claude Desktop +### 5. (Optional) MCP Server - Query Your Extracted Tips + +**Important:** The MCP server only reads tips you've already extracted. Run scrapers first (Steps 3-4) to populate models/ folder! -Add SCAPO as an MCP server to query your extracted tips (from models/ folder) directly in Claude: +Add SCAPO as an MCP server to query your extracted tips directly in MCP-compatible clients: ```json -// Add to claude_desktop_config.json +// Add to config.json { "mcpServers": { "scapo": { "command": "npx", - "args": ["@scapo/mcp-server"], + "args": ["@arahangua/scapo-mcp-server"], "env": { - "SCAPO_MODELS_PATH": "path/to/scapo/models" + "SCAPO_MODELS_PATH": "/path/to/scapo/models" } } } } ``` -Then ask Claude: "Get best practices for Midjourney" - no Python needed! - ## 📊 Understanding the Output SCAPO creates organized documentation: @@ -151,6 +154,19 @@ models/ │ └── parameters.json # Recommended settings ``` +## ⚙️ Utility Commands + +```bash +# Update OpenRouter model context cache (for accurate batching) +scapo update-context # Updates if >24h old +scapo update-context -f # Force update + +# View extracted tips +scapo tui # Interactive TUI explorer +scapo models list # List all extracted models +scapo models search "copilot" # Search for specific models +``` + ## ⚙️ The --limit flag ```bash diff --git a/README.md b/README.md index 2506f5b..e382f31 100644 --- a/README.md +++ b/README.md @@ -103,6 +103,13 @@ cp .env.example .env Get your API key from [openrouter.ai](https://openrouter.ai/) * you can also use local LLMs (Ollama, LMstudio). Check [QUICKSTART.md](./QUICKSTART.md) +#### Important: Update Model Context Cache (OpenRouter users) +```bash +# REQUIRED for optimal performance - fetches accurate token limits +scapo update-context # Creates cache for faster processing +``` +Without this, SCAPO defaults to 4096 tokens (severely limiting batch efficiency) + ### 3. Start Extracting Optimization Tips @@ -274,18 +281,17 @@ MAX_POSTS_PER_SCRAPE=100 # Limit per source ``` Hand-wavy breakdown: With 5 posts, extraction success ~20%. With 20+ posts, success jumps to ~80%. -## 🤖 MCP Server for Claude Desktop +## 🤖 MCP Server (Optional Reader) -Query your extracted tips directly in Claude (reads from models/ folder - run scrapers first!): +**Note:** The MCP server is a reader that queries your already-extracted tips. You must run SCAPO scrapers first to populate the models/ folder! ```json -// Add to %APPDATA%\Claude\claude_desktop_config.json (Windows) -// or ~/Library/Application Support/Claude/claude_desktop_config.json (macOS) +// Add to your client's mcp config.json { "mcpServers": { "scapo": { "command": "npx", - "args": ["@scapo/mcp-server"], + "args": ["@arahangua/scapo-mcp-server"], "env": { "SCAPO_MODELS_PATH": "C:\\path\\to\\scapo\\models" // Your models folder } @@ -293,9 +299,6 @@ Query your extracted tips directly in Claude (reads from models/ folder - run sc } } ``` - -Then ask Claude: "Get me best practices for GitHub Copilot" or "What models are good for coding?" - See [mcp/README.md](mcp/README.md) for full setup and available commands. ## 🎨 Interactive TUI @@ -378,7 +381,12 @@ Built as part of the CZero Engine project to improve AI application development. - [OpenRouter](https://openrouter.ai/) for accessible AI APIs - Coffee ☕ for making this possible - [Ollama](https://ollama.com/) and [LMstudio](https://lmstudio.ai/) for awesome local LLM experience -- [Awesome Generative AI](https://github.com/steven2358/awesome-generative-ai) & [Awesome AI Tools](https://github.com/mahseema/awesome-ai-tools) for service discovery +- Service discovery powered by awesome lists: + - [steven2358/awesome-generative-ai](https://github.com/steven2358/awesome-generative-ai) + - [mahseema/awesome-ai-tools](https://github.com/mahseema/awesome-ai-tools) + - [filipecalegario/awesome-generative-ai](https://github.com/filipecalegario/awesome-generative-ai) + - [aishwaryanr/awesome-generative-ai-guide](https://github.com/aishwaryanr/awesome-generative-ai-guide) + - [eudk/awesome-ai-tools](https://github.com/eudk/awesome-ai-tools) - All opensource contributors in this space --- diff --git a/mcp/README.md b/mcp/README.md index 7064107..7c24046 100644 --- a/mcp/README.md +++ b/mcp/README.md @@ -1,62 +1,76 @@ # SCAPO MCP Server -A Model Context Protocol (MCP) server for querying AI/ML best practices from the SCAPO (Stay Calm and Prompt On) knowledge base. Features intelligent fuzzy matching for improved user experience when querying model information. +A Model Context Protocol (MCP) server that makes your locally-extracted SCAPO knowledge base queryable. + +⚠️ **This is a reader, not a scraper!** You must first use [SCAPO](https://github.com/czero-cc/scapo) to extract tips into your `models/` folder. ## Documentation For comprehensive usage instructions, examples, and technical details, please see the **[Usage Guide](usage-guide.md)**. -## Installation +## Prerequisites + +1. **Clone and set up SCAPO first**: + ```bash + git clone https://github.com/czero-cc/scapo.git + cd scapo + # Follow SCAPO setup to run scrapers and populate models/ + ``` + +2. **Required**: + - Node.js 18+ + - npm or npx + - Populated `models/` directory (from running SCAPO scrapers) + +## How It Works + +**IMPORTANT**: This MCP server ONLY reads from your local `models/` folder. It does NOT scrape data itself! + +1. First, use SCAPO to scrape and extract tips into `models/` +2. Then, this MCP server makes those tips queryable in your AI client -You can use this MCP server directly with `npx` (no Python required): +## Quick Start ```bash -npx @scapo/mcp-server +# Step 1: Set up SCAPO and extract tips +git clone https://github.com/czero-cc/scapo.git +cd scapo +# Follow SCAPO README to configure and run scrapers +scapo scrape targeted --service "GitHub Copilot" --limit 20 + +# Step 2: Configure MCP to read your extracted tips +# Add to your MCP client config with YOUR path to scapo/models/ ``` -Or install it globally: +## Installation ```bash -npm install -g @scapo/mcp-server +npx @arahangua/scapo-mcp-server ``` -## Usage with Claude Desktop - -Add this to your Claude Desktop configuration file: +## Configuration for MCP Clients -### Windows -Edit `%APPDATA%\Claude\claude_desktop_config.json`: +Add this to your MCP client's configuration: ```json { "mcpServers": { "scapo": { "command": "npx", - "args": ["@scapo/mcp-server"], + "args": ["@arahangua/scapo-mcp-server"], "env": { - "SCAPO_MODELS_PATH": "C:\\path\\to\\scapo\\models" + "SCAPO_MODELS_PATH": "/absolute/path/to/your/scapo/models" // From your cloned SCAPO repo! } } } } ``` -### macOS -Edit `~/Library/Application Support/Claude/claude_desktop_config.json`: +**Note:** Set `SCAPO_MODELS_PATH` to the absolute path of your SCAPO models directory. -```json -{ - "mcpServers": { - "scapo": { - "command": "npx", - "args": ["@scapo/mcp-server"], - "env": { - "SCAPO_MODELS_PATH": "/path/to/scapo/models" - } - } - } -} -``` +For Claude Desktop specifically: +- Windows: Edit `%APPDATA%\Claude\claude_desktop_config.json` +- macOS: Edit `~/Library/Application Support/Claude/claude_desktop_config.json` ## Available Tools @@ -89,7 +103,7 @@ List all available models by category. ``` Arguments: -- category: Model category ("text", "image", "video", "audio", "multimodal", "all") +- category: Model category ("text", "image", "video", "audio", "multimodal", "code", "all") ``` Example in Claude: @@ -114,12 +128,14 @@ Example in Claude: ## Features - **Intelligent Fuzzy Matching**: Handles typos, partial names, and variations automatically -- **No Python Required**: Pure Node.js implementation using npx + - Typo tolerance: `heygen` → "HeyGen", `gemeni` → "Gemini" + - Partial matching: `qwen` → finds all Qwen variants + - Case insensitive: `LLAMA-3` → "llama-3" - **Fully Standalone**: Works without any API server running - **Direct File Access**: Reads from local model files - **Smart Search**: Advanced search with similarity scoring - **Smart Recommendations**: Suggests models based on use case -- **Easy Integration**: Works seamlessly with Claude Desktop +- **Easy Integration**: Works with any MCP-compatible client - **Helpful Suggestions**: Provides alternatives when exact matches aren't found ## Use Cases @@ -150,24 +166,13 @@ models/ └── ... ``` -## Advantages Over Python Version - -1. **No Python Setup**: Works with just Node.js (which Claude Desktop already has) -2. **Simple npx Usage**: One command to run, no installation needed -3. **Better IDE Integration**: Works seamlessly with Cursor and other IDEs -4. **Faster Startup**: Node.js starts faster than Python -5. **Native JSON Handling**: Better performance for JSON operations - ## Contributing -To publish updates to npm: - -```bash -cd mcp -npm version patch # or minor/major -npm publish --access public -``` +To contribute improvements: +1. Fork the [SCAPO repository](https://github.com/czero-cc/SCAPO) +2. Make your changes in the `mcp/` directory +3. Submit a pull request ## License -Same as the parent SCAPO repository. \ No newline at end of file +Same as the parent [SCAPO](https://github.com/czero-cc/SCAPO) repository. \ No newline at end of file diff --git a/mcp/package-lock.json b/mcp/package-lock.json index 4e6ecad..4574a7c 100644 --- a/mcp/package-lock.json +++ b/mcp/package-lock.json @@ -1,12 +1,12 @@ { "name": "@scapo/mcp-server", - "version": "1.0.0", + "version": "1.0.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@scapo/mcp-server", - "version": "1.0.0", + "version": "1.0.1", "license": "MIT", "dependencies": { "@modelcontextprotocol/sdk": "^0.5.0", diff --git a/mcp/package.json b/mcp/package.json index 81172a0..69cfaff 100644 --- a/mcp/package.json +++ b/mcp/package.json @@ -1,6 +1,6 @@ { - "name": "@scapo/mcp-server", - "version": "1.0.0", + "name": "@arahangua/scapo-mcp-server", + "version": "1.0.4", "description": "Stay Calm and Prompt On (SCAPO) - MCP server for AI/ML best practices", "main": "index.js", "type": "module", @@ -27,4 +27,4 @@ "engines": { "node": ">=18.0.0" } -} \ No newline at end of file +} diff --git a/mcp/usage-guide.md b/mcp/usage-guide.md index d6c66fc..049f947 100644 --- a/mcp/usage-guide.md +++ b/mcp/usage-guide.md @@ -13,15 +13,11 @@ The SCAPO MCP (Model Context Protocol) server provides intelligent access to AI/ ### Installation ```bash -# Clone the repository -git clone https://github.com/your-org/scapo.git -cd scapo/mcp +# After installing SCAPO, the MCP server can be used directly +npx @arahangua/scapo-mcp-server -# Install dependencies -npm install - -# Or run directly with npx (no installation needed) -npx @scapo/mcp-server +# Or install globally +npm install -g @arahangua/scapo-mcp-server ``` ### Basic Setup @@ -43,8 +39,8 @@ npm start { "mcpServers": { "scapo": { - "command": "node", - "args": ["/path/to/scapo/mcp/index.js"], + "command": "npx", + "args": ["@arahangua/scapo-mcp-server"], "env": { "SCAPO_MODELS_PATH": "/path/to/scapo/models" } diff --git a/models/code/coderabbit/metadata.json b/models/code/coderabbit/metadata.json new file mode 100644 index 0000000..a90fa1e --- /dev/null +++ b/models/code/coderabbit/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "CodeRabbit", + "category": "code", + "last_updated": "2025-08-16T18:29:24.327526", + "extraction_timestamp": "2025-08-16T18:10:37.657193", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 104, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/code/coderabbit/prompting.md b/models/code/coderabbit/prompting.md new file mode 100644 index 0000000..6be9c0e --- /dev/null +++ b/models/code/coderabbit/prompting.md @@ -0,0 +1,13 @@ +# CodeRabbit Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- Read more here: https://coderabbit.ai/blog/coderabbit-openai-rate-limits +- This blog discusses how CodeRabbit uses FluxNinja Aperture to manage OpenAI’s limits and ensure optimal operation even during peak load. + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/code/flexapp/metadata.json b/models/code/flexapp/metadata.json new file mode 100644 index 0000000..66c1ed9 --- /dev/null +++ b/models/code/flexapp/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "FlexApp", + "category": "code", + "last_updated": "2025-08-16T18:29:24.858720", + "extraction_timestamp": "2025-08-16T18:15:53.143001", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 128, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/code/flexapp/pitfalls.md b/models/code/flexapp/pitfalls.md new file mode 100644 index 0000000..f7d8e01 --- /dev/null +++ b/models/code/flexapp/pitfalls.md @@ -0,0 +1,9 @@ +# FlexApp - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Policy & Account Issues + +### ⚠️ on the Flex app it says 'Resolve other account, this may take 24 hours' +**Note**: Be aware of terms of service regarding account creation. + diff --git a/models/code/flexapp/prompting.md b/models/code/flexapp/prompting.md new file mode 100644 index 0000000..0c511b6 --- /dev/null +++ b/models/code/flexapp/prompting.md @@ -0,0 +1,20 @@ +# FlexApp Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- Resolve package itinerary issue by contacting station manager to remove package from itinerary. +- Just redownloaded it to check and it works now. False alarm lol +- Use airplane mode to solve lag in Flex app. +- When I wanted to build the FlexApp, I built the initial working prototype within 4 days and launched on Twitter for waitlist. +- Avoid using Stride app while scanning packages to prevent performance degradation. +- So now Flex wants us to update our vehicle fuel type to ensure we receive offers best suited for our vehicles. (Gasoline, diesel, hybrid, and electric) +- I had to call support 5 times to have them mark packages as delivered. +- Flex 3 beta includes a Trending tab showing most popular downloads of last 24 hours. +- Download Flex 3 beta from http://getdelta.co. + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/code/kiln/cost_optimization.md b/models/code/kiln/cost_optimization.md new file mode 100644 index 0000000..83c4c5c --- /dev/null +++ b/models/code/kiln/cost_optimization.md @@ -0,0 +1,9 @@ +# Kiln - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- Sadly if i want to set Ethereum to the Staking Pool it needs a fee of 0,038 (!) ethereum which is a lot. +- fee of 0,038 ethereum + diff --git a/models/code/kiln/metadata.json b/models/code/kiln/metadata.json new file mode 100644 index 0000000..e181edb --- /dev/null +++ b/models/code/kiln/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "Kiln", + "category": "code", + "last_updated": "2025-08-16T18:29:24.579659", + "extraction_timestamp": "2025-08-16T18:12:59.239428", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 358, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/code/kiln/parameters.json b/models/code/kiln/parameters.json new file mode 100644 index 0000000..e9cdbf4 --- /dev/null +++ b/models/code/kiln/parameters.json @@ -0,0 +1,13 @@ +{ + "service": "Kiln", + "last_updated": "2025-08-16T18:29:24.454944", + "recommended_settings": {}, + "cost_optimization": { + "tip_0": "Sadly if i want to set Ethereum to the Staking Pool it needs a fee of 0,038 (!) ethereum which is a lot.", + "tip_1": "fee of 0,038 ethereum" + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/code/kiln/pitfalls.md b/models/code/kiln/pitfalls.md new file mode 100644 index 0000000..3f9d9d3 --- /dev/null +++ b/models/code/kiln/pitfalls.md @@ -0,0 +1,14 @@ +# Kiln - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Technical Issues + +### ⚠️ Kiln and hunting cabin reduce each other's cultivation area; bug with forest density; no ETA on fix. + +### ⚠️ Kilns overlapping and not working, clipped orchards, etc due to forest density bug. + +## Cost & Limits + +### 💰 I can't figure out how to get out of "SIMULATION MODE" in this [GitHub - jbruce12000/kiln-controller: Turns a Raspberry Pi into an inexpensive, web-enabled kiln controller.](https://github.com/jbruce12000/kiln-controller) I have tried every which way. I know I'm probably skipping a step, or misunderstanding a step. Everything has been wired correctly and tested. Has anyone but stuck at this step too? u/jbruce12000 I need help.... + diff --git a/models/code/kiln/prompting.md b/models/code/kiln/prompting.md new file mode 100644 index 0000000..fd58185 --- /dev/null +++ b/models/code/kiln/prompting.md @@ -0,0 +1,18 @@ +# Kiln Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- Auto-compounding is a key feature i don't want to miss. +- After updating Ubuntu and rebooting, Kiln can get stuck with the message: 'Gathering baker data...Some information will be temporarily unavailable as Kiln gathers information from the blockchain. This can take up to a few hours. Kiln will gather new data about this baker in the background at the ...' and may take up to a few hours to complete. +- Topic-trees: generate a nested topic tree to build content breadth. +- Great UI: our one-click apps for Mac and Windows provide a really nice UX for synthetic data generation. +- I was looking forward to stake pooled on Kiln via Ledger Live since i like that it is auto-compounding which is a key feature i don't want to miss. +- Besides of this it matched the most what i was looking for since the APY is good and as said its compounding. +- Stake pooled on Kiln via Ledger Live. + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/code/kilo-code/cost_optimization.md b/models/code/kilo-code/cost_optimization.md new file mode 100644 index 0000000..4e4d03a --- /dev/null +++ b/models/code/kilo-code/cost_optimization.md @@ -0,0 +1,23 @@ +# Kilo Code - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- Kilo Code now supports OpenAI's new open-source models: GPT OSS 20B (131k context window) and GPT OSS 120B (151k context window). The 120B version charges $0.15/M for input tokens and $0.60/M for output tokens. +- Openrouter doc states that purchasing at least 10 credits increases daily limit to 1000 :free model requests per day. +- $20 subscription gives ~ $40 worth usage, 1000 o3 requests +- Kilo just broke 1 trillion tokens/month +- After three calls to Claude 4 Sonnet in Kilo Code, $1.50 was used, indicating high cost per request. +- Kilo Code covers all costs for premium models during the workshop, allowing use of Claude Opus 4, Gemini 2.5 Pro, GPT-4.1, and other premium models completely free of charge. +- No monthly minimums or hidden fees +- 300% bonus credits on the next 500 top ups + +## Money-Saving Tips + +- Flexible Credit Management: Control exactly when your balance reloads from your payment method—no monthly minimums or hidden fees +- Use OpenRouter to set up Claude 4 in Kilo Code to avoid rate limiting +- If you purchase at least 10 credits on Openrouter, your daily limit is increased to 1000 :free model requests per day, which applies to Kilo Code. +- Use Openrouter API with at least $10 credits to increase daily limit to 1000 :free model requests per day for Kilo Code. +- Set up Claude 4 through Openrouter to avoid immediate rate limiting in Kilo Code. + diff --git a/models/code/kilo-code/metadata.json b/models/code/kilo-code/metadata.json new file mode 100644 index 0000000..4392cc5 --- /dev/null +++ b/models/code/kilo-code/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "Kilo Code", + "category": "code", + "last_updated": "2025-08-16T18:29:25.127674", + "extraction_timestamp": "2025-08-16T18:18:16.158336", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 138, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/code/kilo-code/parameters.json b/models/code/kilo-code/parameters.json new file mode 100644 index 0000000..b7a9605 --- /dev/null +++ b/models/code/kilo-code/parameters.json @@ -0,0 +1,42 @@ +{ + "service": "Kilo Code", + "last_updated": "2025-08-16T18:29:24.997151", + "recommended_settings": { + "setting_0": { + "description": "profile=Nano" + }, + "setting_1": { + "description": "model=GPT-4.1-Nano" + }, + "setting_2": { + "description": "global_shortcut=Cmd+Shift+A (Mac) or Ctrl+Shift+A (Windows)" + }, + "setting_3": { + "description": "indexer=built-in" + }, + "setting_4": { + "description": "llama.cpp=server_mode" + }, + "setting_5": { + "description": "embedder=nomic-embed-code" + }, + "setting_6": { + "description": "vector_db=Qdrant_Docker" + }, + "setting_7": { + "description": "api_key_source=build.nvidia.com" + } + }, + "cost_optimization": { + "pricing": "After three calls to Claude 4 Sonnet in Kilo Code, $1.50 was used, indicating high cost per request.", + "tip_1": "Openrouter doc states that purchasing at least 10 credits increases daily limit to 1000 :free model requests per day.", + "tip_2": "Kilo just broke 1 trillion tokens/month", + "tip_3": "Kilo Code covers all costs for premium models during the workshop, allowing use of Claude Opus 4, Gemini 2.5 Pro, GPT-4.1, and other premium models completely free of charge.", + "tip_4": "No monthly minimums or hidden fees", + "tip_5": "300% bonus credits on the next 500 top ups" + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/code/kilo-code/pitfalls.md b/models/code/kilo-code/pitfalls.md new file mode 100644 index 0000000..76e50f3 --- /dev/null +++ b/models/code/kilo-code/pitfalls.md @@ -0,0 +1,32 @@ +# Kilo Code - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Technical Issues + +### ⚠️ I am trying to Kilo code with its api, I just load money in it but I cannot use it properly, it only used 25.2k contenct lenght but always trow and too large error. I do not included even a picture because apperantly picture causes a bigger problems. Please fix this or help me if I am doing something wrong. + +### ⚠️ API Request Failed error 9 when using Claude 3.7 in Kilo Code. + +### ⚠️ I was using rooCode, cline and KiloCode . I put all my api keys but it was not working. After 2 prompts it will leave a message of 401 error dont know why? I was even using free models help me please +**Fix**: Store API keys in environment variables or use a secrets manager. + +### ⚠️ Terminal empty command bugs + +### ⚠️ Hi. Having problem with kilo code. Here the error : + +Requested token count exceeds the model's maximum context length of 98304 tokens. You requested a total of 104096 tokens: 71328 tokens from the input messages and 32768 tokens for the completion. Please reduce the number of tokens in the input messages or the completion to fit within the limit. + + +I handling large project . I already try to only allow 500text per read to reduce input token. But somehow got problem with output token. How to ma + +## Cost & Limits + +### 💰 Rate limited when using Claude 4 in Kilo Code + +### 💰 429 Rate limit encountered when using free LLM in Kilo Code. + +### 💰 Rate limits of 10 requests per minute when using Gemini 2.5 Flash in Kilo Code. + +### 💰 Openrouter doc states that purchasing at least 10 credits increases daily limit to 1000 :free model requests per day. + diff --git a/models/code/kilo-code/prompting.md b/models/code/kilo-code/prompting.md new file mode 100644 index 0000000..13e609f --- /dev/null +++ b/models/code/kilo-code/prompting.md @@ -0,0 +1,47 @@ +# Kilo Code Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- Get Your API Key: Visit https://build.nvidia.com/settings/api-keys to generate you +- Activate Kilo Code from anywhere with Cmd+Shift+A (Mac) or Ctrl+Shift+A (Windows) +- Flexible Credit Management: Control exactly when your balance reloads from your payment method—no monthly minimums or hidden fees +- Run llama.cpp in server mode (OpenAI-compatible API) for local inference +- Deploy Qdrant in Docker as the vector DB with cosine similarity +- I pay $20 and I get ~ $40 worth of AI usage (1000 o3 requests) +- Qdrant (Docker) as the vector DB (cosine) +- Use custom keyboard shortcuts for accepting suggestions +- Create a new config profile called 'Nano' that uses GPT-4.1-Nano instead of Claude 3.7 Sonnet to speed up the Enhance Prompt feature +- Use OpenRouter to set up Claude 4 in Kilo Code to avoid rate limiting +- If you purchase at least 10 credits on Openrouter, your daily limit is increased to 1000 :free model requests per day, which applies to Kilo Code. +- Kilo Code with built-in indexer +- Use Cmd+I for quick inline tasks directly in your editor - select code, describe what you want, get AI suggestions without breaking flow +- Configure the Enhance Prompt feature to use a different model (e.g., GPT-4.1-Nano) than your main coding tasks +- Use the MCP Marketplace to install AI capabilities with a single click +- When using Claude 4 in Kilo Code, note that the :thinking variety is not selectable. +- Use Kilo Code's built-in indexer for local-first codebase indexing +- Use Cmd+L: "Let Kilo Decide" - AI automatically suggests obvious improvements based on context +- llama.cpp in server mode (OpenAI-compatible API) +- Use nomic-embed-code (GGUF, Q6_K_L) as the embedder for 3,584-dim embeddings +- Enable system notifications to never miss approval requests even when the editor is minimized +- Use Openrouter API with at least $10 credits to increase daily limit to 1000 :free model requests per day for Kilo Code. +- nomic-embed-code (GGUF, Q6_K_L) as the embedder (3,584-dim) +- Local-first codebase indexing can be achieved by using Kilo Code with built-in indexer, llama.cpp server mode, nomic-embed-code, and Qdrant Docker. +- Set up Claude 4 through Openrouter to avoid immediate rate limiting in Kilo Code. + +## Recommended Settings + +- profile=Nano +- model=GPT-4.1-Nano +- global_shortcut=Cmd+Shift+A (Mac) or Ctrl+Shift+A (Windows) +- indexer=built-in +- llama.cpp=server_mode +- embedder=nomic-embed-code +- vector_db=Qdrant_Docker +- api_key_source=build.nvidia.com + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/code/mindstudio/metadata.json b/models/code/mindstudio/metadata.json new file mode 100644 index 0000000..2027a56 --- /dev/null +++ b/models/code/mindstudio/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "MindStudio", + "category": "code", + "last_updated": "2025-08-16T18:29:25.398108", + "extraction_timestamp": "2025-08-16T18:22:32.004848", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 72, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/code/mindstudio/pitfalls.md b/models/code/mindstudio/pitfalls.md new file mode 100644 index 0000000..b70a63a --- /dev/null +++ b/models/code/mindstudio/pitfalls.md @@ -0,0 +1,6 @@ +# MindStudio - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +*No major issues reported yet. This may indicate limited community data.* + diff --git a/models/code/openai-codex/cost_optimization.md b/models/code/openai-codex/cost_optimization.md new file mode 100644 index 0000000..d8deeec --- /dev/null +++ b/models/code/openai-codex/cost_optimization.md @@ -0,0 +1,14 @@ +# OpenAI Codex - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- One-time/USD +- $200 plan on OpenAI Codex +- included in a Pro subscription with no visible rate limits +- pushed past $200 worth of usage in a day, running multiple coding tasks in parallel without slowdown +- pushed past $200 worth of usage in a day +- effectively becomes free for targeted tasks that would normally rack up API fees +- Included in a Pro subscription with no visible rate limits + diff --git a/models/code/openai-codex/metadata.json b/models/code/openai-codex/metadata.json new file mode 100644 index 0000000..1f433e5 --- /dev/null +++ b/models/code/openai-codex/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "OpenAI Codex", + "category": "code", + "last_updated": "2025-08-16T18:29:23.810555", + "extraction_timestamp": null, + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 172, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/code/openai-codex/parameters.json b/models/code/openai-codex/parameters.json new file mode 100644 index 0000000..1a580a6 --- /dev/null +++ b/models/code/openai-codex/parameters.json @@ -0,0 +1,44 @@ +{ + "service": "OpenAI Codex", + "last_updated": "2025-08-16T18:29:23.687512", + "recommended_settings": { + "setting_0": { + "description": "model=your-kobold-model" + }, + "setting_1": { + "description": "provider=kobold" + }, + "setting_2": { + "description": "providers.kobold.name=Kobold" + }, + "setting_3": { + "description": "providers.kobold.baseURL=http://localhost:5001/v1" + }, + "setting_4": { + "description": "providers.kobold.envKey=KOBOLD_API_KEY" + }, + "setting_5": { + "description": "config_path=~/.codex/config.json" + }, + "setting_6": { + "description": "provider=ollama" + }, + "setting_7": { + "description": "model=deepseek-r1:1.5b" + }, + "setting_8": { + "description": "command=codex -p ollama -m deepseek-r1:1.5b" + } + }, + "cost_optimization": { + "tip_0": "One-time/USD", + "pricing": "pushed past $200 worth of usage in a day", + "tip_2": "included in a Pro subscription with no visible rate limits", + "tip_3": "effectively becomes free for targeted tasks that would normally rack up API fees", + "tip_4": "Included in a Pro subscription with no visible rate limits" + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/code/openai-codex/pitfalls.md b/models/code/openai-codex/pitfalls.md new file mode 100644 index 0000000..6fe49e3 --- /dev/null +++ b/models/code/openai-codex/pitfalls.md @@ -0,0 +1,20 @@ +# OpenAI Codex - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Technical Issues + +### ⚠️ OpenAI Codex error: 'Your input exceeds the context window of this model. Please adjust your input and try again' + +## Cost & Limits + +### 💰 OpenAI Codex CLI usage limits reset every 5h and every week + +### 💰 You've hit usage your usage limit. Limits reset every 5h and every week. + +### 💰 Pro tier hits the weekly limit after just a couple days of single agent use + +### 💰 included in a Pro subscription with no visible rate limits + +### 💰 Included in a Pro subscription with no visible rate limits + diff --git a/models/code/openai-codex/prompting.md b/models/code/openai-codex/prompting.md new file mode 100644 index 0000000..3b08606 --- /dev/null +++ b/models/code/openai-codex/prompting.md @@ -0,0 +1,28 @@ +# OpenAI Codex Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- The codex CLI agent supports other providers and does not require OpenAI account settings, tokens, or registration cookie calls +- Use codex CLI with an OpenAI Plus/Pro subscription to access command-line GPT-5 without per-token billing. +- The clickable "Suggested task → Start task" buttons appear when you’re in a Codex Ask conversation that (a) is connected to a repository sandbox, and (b) ... +- Use the codex CLI agent with the --config option to set the model name and local Ollama port, e.g., codex --config model=ollama-model port=11434 +- clickable 'Suggested task → Start task' buttons appear when you're in a Codex Ask conversation that is connected to a repository sandbox + +## Recommended Settings + +- model=your-kobold-model +- provider=kobold +- providers.kobold.name=Kobold +- providers.kobold.baseURL=http://localhost:5001/v1 +- providers.kobold.envKey=KOBOLD_API_KEY +- config_path=~/.codex/config.json +- provider=ollama +- model=deepseek-r1:1.5b +- command=codex -p ollama -m deepseek-r1:1.5b + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/code/wordware/cost_optimization.md b/models/code/wordware/cost_optimization.md new file mode 100644 index 0000000..5c418ea --- /dev/null +++ b/models/code/wordware/cost_optimization.md @@ -0,0 +1,8 @@ +# Wordware - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- Free/USD + diff --git a/models/code/wordware/metadata.json b/models/code/wordware/metadata.json new file mode 100644 index 0000000..f43997f --- /dev/null +++ b/models/code/wordware/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "Wordware", + "category": "code", + "last_updated": "2025-08-16T18:29:24.061206", + "extraction_timestamp": "2025-08-16T18:09:34.011851", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 53, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/code/wordware/parameters.json b/models/code/wordware/parameters.json new file mode 100644 index 0000000..4b426f7 --- /dev/null +++ b/models/code/wordware/parameters.json @@ -0,0 +1,12 @@ +{ + "service": "Wordware", + "last_updated": "2025-08-16T18:29:23.935884", + "recommended_settings": {}, + "cost_optimization": { + "tip_0": "Free/USD" + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/general/all/metadata.json b/models/general/all/metadata.json deleted file mode 100644 index d5e0485..0000000 --- a/models/general/all/metadata.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "service": "All", - "category": "general", - "last_updated": "2025-08-14T19:35:32.580911", - "extraction_timestamp": "2025-08-14T19:35:32.580911", - "data_sources": [ - "Reddit API", - "Community discussions" - ], - "posts_analyzed": 0, - "confidence": "medium", - "version": "1.0.0" -} \ No newline at end of file diff --git a/models/general/all/prompting.md b/models/general/all/prompting.md deleted file mode 100644 index bf20f21..0000000 --- a/models/general/all/prompting.md +++ /dev/null @@ -1,20 +0,0 @@ -# All Prompting Guide - -*Last updated: 2025-08-14* - -## Tips & Techniques - -- Set temperature to 0.7 - temperature: 0.7 -- Set top_p to 0.95 - top_p: 0.95 -- Set steps to 50 - steps: 50 -- Set cfg to 7 - cfg: 7 -- Use a system prompt - include a system prompt to guide behavior -- Apply chain of thought - prompt the model to reason step-by-step -- Provide few-shot examples - include a few example inputs and outputs in the prompt -- Use streaming for slow responses - enable streaming mode to receive partial outputs -- Batch requests to reduce cost - send multiple prompts in a single API call - -## Sources - -- Reddit community discussions -- User-reported experiences diff --git a/models/general/github-pages/metadata.json b/models/general/github-pages/metadata.json deleted file mode 100644 index b6fd2ca..0000000 --- a/models/general/github-pages/metadata.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "service": "GitHub Pages", - "category": "general", - "last_updated": "2025-08-14T19:41:56.878095", - "extraction_timestamp": "2025-08-14T19:41:56.878095", - "data_sources": [ - "Reddit API", - "Community discussions" - ], - "posts_analyzed": 0, - "confidence": "medium", - "version": "1.0.0" -} \ No newline at end of file diff --git a/models/general/github-pages/prompting.md b/models/general/github-pages/prompting.md deleted file mode 100644 index 36c7278..0000000 --- a/models/general/github-pages/prompting.md +++ /dev/null @@ -1,12 +0,0 @@ -# GitHub Pages Prompting Guide - -*Last updated: 2025-08-14* - -## Tips & Techniques - -- Use GitHub Pages to host JSON data for transparency reports - Push a JSON file commit to a GitHub repository, enable GitHub Pages, and serve the JSON file as a static asset. - -## Sources - -- Reddit community discussions -- User-reported experiences diff --git a/models/image/alpaca/cost_optimization.md b/models/image/alpaca/cost_optimization.md new file mode 100644 index 0000000..2692c7c --- /dev/null +++ b/models/image/alpaca/cost_optimization.md @@ -0,0 +1,14 @@ +# Alpaca - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- 0 transaction cost (PFOF) +- $1000 per month platform fee + +## Money-Saving Tips + +- Alpaca is commission free. +- Speed. Can avoid rate-limit issues and its faster than an API request. Bypassing this bottle neck enables faster processing + diff --git a/models/image/alpaca/metadata.json b/models/image/alpaca/metadata.json new file mode 100644 index 0000000..ab66e47 --- /dev/null +++ b/models/image/alpaca/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "Alpaca", + "category": "image", + "last_updated": "2025-08-16T17:49:37.392759", + "extraction_timestamp": null, + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 137, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/alpaca/parameters.json b/models/image/alpaca/parameters.json new file mode 100644 index 0000000..135492c --- /dev/null +++ b/models/image/alpaca/parameters.json @@ -0,0 +1,13 @@ +{ + "service": "Alpaca", + "last_updated": "2025-08-16T17:49:37.323180", + "recommended_settings": {}, + "cost_optimization": { + "tip_0": "0 transaction cost (PFOF)", + "pricing": "$1000 per month platform fee" + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/image/alpaca/pitfalls.md b/models/image/alpaca/pitfalls.md new file mode 100644 index 0000000..27d09f6 --- /dev/null +++ b/models/image/alpaca/pitfalls.md @@ -0,0 +1,10 @@ +# Alpaca - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Technical Issues + +### ⚠️ My account was blocked from trading as im scalping stocks on Alpaca with 1 min charts. This error was returned. {"code":40310100,"message":"trade denied due to pattern day trading protection"} + +### ⚠️ ERROR: Error executing trade for MCD: {"code":40310000,"message":"account not eligible to trade uncovered option contracts"} despite having Level 3 options approval. + diff --git a/models/image/alpaca/prompting.md b/models/image/alpaca/prompting.md new file mode 100644 index 0000000..9f4ec34 --- /dev/null +++ b/models/image/alpaca/prompting.md @@ -0,0 +1,15 @@ +# Alpaca Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- why go through the hassle of setting up your own database and API when you can access someone else’s, like Alpaca’s? +- Alpaca is commission free. +- Speed. Can avoid rate-limit issues and its faster than an API request. Bypassing this bottle neck enables faster processing +- WestLake-7B-v2 (this time with the preferred Alpaca prompt format) + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/image/astria/cost_optimization.md b/models/image/astria/cost_optimization.md new file mode 100644 index 0000000..98a59b8 --- /dev/null +++ b/models/image/astria/cost_optimization.md @@ -0,0 +1,8 @@ +# Astria - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- $100 for a configuration that's better than mine. + diff --git a/models/image/astria/metadata.json b/models/image/astria/metadata.json new file mode 100644 index 0000000..d2af04f --- /dev/null +++ b/models/image/astria/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "Astria", + "category": "image", + "last_updated": "2025-08-16T17:49:38.107465", + "extraction_timestamp": "2025-08-16T17:31:15.393602", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 163, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/astria/parameters.json b/models/image/astria/parameters.json new file mode 100644 index 0000000..74a8d2c --- /dev/null +++ b/models/image/astria/parameters.json @@ -0,0 +1,37 @@ +{ + "service": "Astria", + "last_updated": "2025-08-16T17:49:38.038921", + "recommended_settings": { + "setting_0": { + "description": "SDXL Steps=30" + }, + "setting_1": { + "description": "Size=768x1024" + }, + "setting_2": { + "description": "dpm++2m_karras=true" + }, + "setting_3": { + "description": "Film grain=true" + }, + "setting_4": { + "description": "Super-Resolution=true" + }, + "setting_5": { + "description": "Face Correct=true" + }, + "setting_6": { + "description": "Face swap=true" + }, + "setting_7": { + "description": "Inpaint Faces=true" + } + }, + "cost_optimization": { + "pricing": "$100 for a configuration that's better than mine." + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/image/astria/pitfalls.md b/models/image/astria/pitfalls.md new file mode 100644 index 0000000..d10e916 --- /dev/null +++ b/models/image/astria/pitfalls.md @@ -0,0 +1,10 @@ +# Astria - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Technical Issues + +### ⚠️ todays patch bugged out all equipment in the game. the one i had on me or in inventory and the new one i get from chests. if i go into item menu i see the stats (like +120 str, +21 mag) but when equipped no stats are added to the character. so basically u play without armour, weapon, helmet or shield. + +### ⚠️ So I'm getting a TypeError when I select the Astria ckpt file for text2img in my Stable Diffusion setup... I don't see any guides on using these model files, particularly for the Astria trained files. Am I missing something obvious here? + diff --git a/models/image/astria/prompting.md b/models/image/astria/prompting.md new file mode 100644 index 0000000..a4a7afe --- /dev/null +++ b/models/image/astria/prompting.md @@ -0,0 +1,23 @@ +# Astria Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- Use the following Astria config: SDXL Steps: 30Size: 768x1024dpm++2m_karrasFilm grainSuper-ResolutionFace CorrectFace swapInpaint Faces + +## Recommended Settings + +- SDXL Steps=30 +- Size=768x1024 +- dpm++2m_karras=true +- Film grain=true +- Super-Resolution=true +- Face Correct=true +- Face swap=true +- Inpaint Faces=true + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/image/civitai/metadata.json b/models/image/civitai/metadata.json new file mode 100644 index 0000000..00931b7 --- /dev/null +++ b/models/image/civitai/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "CivitAI", + "category": "image", + "last_updated": "2025-08-16T17:49:38.252539", + "extraction_timestamp": "2025-08-16T17:40:18.097614", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 130, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/civitai/parameters.json b/models/image/civitai/parameters.json new file mode 100644 index 0000000..309abca --- /dev/null +++ b/models/image/civitai/parameters.json @@ -0,0 +1,29 @@ +{ + "service": "CivitAI", + "last_updated": "2025-08-16T17:49:38.182838", + "recommended_settings": { + "setting_0": { + "description": "remote_api_tokens.url_regex=civitai.com" + }, + "setting_1": { + "description": "remote_api_tokens.token=11111111111111111111111111111111111" + }, + "setting_2": { + "description": "engine=kohya" + }, + "setting_3": { + "description": "unetLR=0.0001" + }, + "setting_4": { + "description": "clipSkip=1" + }, + "setting_5": { + "description": "loraType=lora" + } + }, + "cost_optimization": {}, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/image/civitai/pitfalls.md b/models/image/civitai/pitfalls.md new file mode 100644 index 0000000..824d7a3 --- /dev/null +++ b/models/image/civitai/pitfalls.md @@ -0,0 +1,14 @@ +# CivitAI - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Technical Issues + +### ⚠️ Whenever I have a CivitAI tab open in Chrome, even on a page with relatively few images, the CPU and memory usage goes through the roof. The website consumes more memory than Stable Diffusion itself does when generating. If the CivitAI tab is left open too long, after a while the PC will completely blue screen.. This happened more and more often until the PC crashed entirely. + +### ⚠️ Using civitai lora URLs with Replicate Flux Dev LoRA returns error: "Prediction failed. Command '['pget', 'https://civitai.com/api/download/models/947302?type=Model&format=SafeTensor&token=XXXXX']'" + +### ⚠️ Prediction failed. Command '['pget', 'https://civitai.com/api/download/models/947302?type=Model&format=SafeTensor&token=XXXXX'" when using civitai lora URLs with Replicate. + +### ⚠️ I've tried testing some CivitAI models, but when I try to generate images, the PC freezes and crashes. These models are around 20GB or more. My conclusion was that those models weren't made to run on my GPU, so I tried other model sizes around 11GB. They didn't work either, they give errors, but at least they don't freeze my PC. So far, only the 'flux1-dev-bnb-nf4-v2' mode + diff --git a/models/image/civitai/prompting.md b/models/image/civitai/prompting.md new file mode 100644 index 0000000..d744c73 --- /dev/null +++ b/models/image/civitai/prompting.md @@ -0,0 +1,24 @@ +# CivitAI Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- Use civitdl v2.0.0 for batch downloading from CivitAI: pip install civitdl --upgrade. +- Finally, I realized that I was using the model page URL instead of the model ***download*** link 😅😝. +- Using wget or curl with the CivitAI API key to download models. +- CivitAI making style LoRAs with only 10 epochs and less than 1,000 steps + +## Recommended Settings + +- remote_api_tokens.url_regex=civitai.com +- remote_api_tokens.token=11111111111111111111111111111111111 +- engine=kohya +- unetLR=0.0001 +- clipSkip=1 +- loraType=lora + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/image/dalle-2/cost_optimization.md b/models/image/dalle-2/cost_optimization.md new file mode 100644 index 0000000..4c694d2 --- /dev/null +++ b/models/image/dalle-2/cost_optimization.md @@ -0,0 +1,11 @@ +# DALLE 2 - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- $6.52 for daily use limit +- Dalle-2 is > 1,000x as dollar efficient as hiring a human illustrator. +- $15 for 115 generation increments +- $6.52 for what has been up until now a daily use limit + diff --git a/models/image/dalle-2/metadata.json b/models/image/dalle-2/metadata.json new file mode 100644 index 0000000..ce471e6 --- /dev/null +++ b/models/image/dalle-2/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "DALLE 2", + "category": "image", + "last_updated": "2025-08-16T17:49:37.101723", + "extraction_timestamp": null, + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 113, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/dalle-2/parameters.json b/models/image/dalle-2/parameters.json new file mode 100644 index 0000000..5f836b9 --- /dev/null +++ b/models/image/dalle-2/parameters.json @@ -0,0 +1,29 @@ +{ + "service": "DALLE 2", + "last_updated": "2025-08-16T17:49:37.028423", + "recommended_settings": { + "setting_0": { + "description": "generation_increments=115" + }, + "setting_1": { + "description": "price_per_increments=$15" + }, + "setting_2": { + "description": "daily_use_limit_price=$6.52" + }, + "setting_3": { + "description": "image_output=4" + }, + "setting_4": { + "description": "image_output_v=3" + } + }, + "cost_optimization": { + "pricing": "$6.52 for what has been up until now a daily use limit", + "tip_1": "Dalle-2 is > 1,000x as dollar efficient as hiring a human illustrator." + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/image/dalle-2/pitfalls.md b/models/image/dalle-2/pitfalls.md new file mode 100644 index 0000000..dd0d8d9 --- /dev/null +++ b/models/image/dalle-2/pitfalls.md @@ -0,0 +1,10 @@ +# DALLE 2 - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Cost & Limits + +### 💰 $6.52 for daily use limit + +### 💰 $6.52 for what has been up until now a daily use limit + diff --git a/models/image/dalle-2/prompting.md b/models/image/dalle-2/prompting.md new file mode 100644 index 0000000..8661a72 --- /dev/null +++ b/models/image/dalle-2/prompting.md @@ -0,0 +1,24 @@ +# DALLE 2 Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- OpenAI owns DALL‑E 2‑generated images to the extent allowed by law. +- Generations can be used for any legal purpose, including for commercial use. +- DALLE 2 can generate images up to 1920x1080 resolution when using text prompts on Discord. +- You may sell your rights to the Generations you create. +- OpenAI offers an interface where you can generate, create variations, inpaint and outpaint. + +## Recommended Settings + +- generation_increments=115 +- price_per_increments=$15 +- daily_use_limit_price=$6.52 +- image_output=4 +- image_output_v=3 + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/image/dreamstudio/cost_optimization.md b/models/image/dreamstudio/cost_optimization.md new file mode 100644 index 0000000..9ef08bd --- /dev/null +++ b/models/image/dreamstudio/cost_optimization.md @@ -0,0 +1,8 @@ +# DreamStudio - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- Dreamstudio credit pricing adjustment (cheaper, that is more options with credits) + diff --git a/models/image/dreamstudio/metadata.json b/models/image/dreamstudio/metadata.json new file mode 100644 index 0000000..04a44d4 --- /dev/null +++ b/models/image/dreamstudio/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "DreamStudio", + "category": "image", + "last_updated": "2025-08-16T17:49:37.250570", + "extraction_timestamp": "2025-08-16T17:07:10.940959", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 149, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/dreamstudio/parameters.json b/models/image/dreamstudio/parameters.json new file mode 100644 index 0000000..4f3f41b --- /dev/null +++ b/models/image/dreamstudio/parameters.json @@ -0,0 +1,12 @@ +{ + "service": "DreamStudio", + "last_updated": "2025-08-16T17:49:37.180971", + "recommended_settings": {}, + "cost_optimization": { + "tip_0": "Dreamstudio credit pricing adjustment (cheaper, that is more options with credits)" + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/image/dreamstudio/pitfalls.md b/models/image/dreamstudio/pitfalls.md new file mode 100644 index 0000000..31eecae --- /dev/null +++ b/models/image/dreamstudio/pitfalls.md @@ -0,0 +1,6 @@ +# DreamStudio - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +*No major issues reported yet. This may indicate limited community data.* + diff --git a/models/image/dreamstudio/prompting.md b/models/image/dreamstudio/prompting.md new file mode 100644 index 0000000..d087e46 --- /dev/null +++ b/models/image/dreamstudio/prompting.md @@ -0,0 +1,19 @@ +# DreamStudio Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- Use your DreamStudio API token in the .env file for the Discord bot. +- Use your DreamStudio API key in the Discord bot; the bot accepts the key from your DreamStudio account. +- This is for folks who have a paid Dreamstudio account. +- Dream Studio API key must be entered manually in the Photoshop plugin; the plugin does not allow pasting the key. +- Grab your API token from DreamStudio. +- inpainting model selection is at the bottom of the UI +- DreamStudio only has SDXL v1 and Stable Diffusion v1.6 models +- Use the basic steps configuration alongside a 4 image batch per gen option. Change your prompt if nothing satisfactory comes up or take the seed of one of the gens if it does and redo it with more steps for a higher quality image. + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/image/jasper/cost_optimization.md b/models/image/jasper/cost_optimization.md new file mode 100644 index 0000000..8e7b8d1 --- /dev/null +++ b/models/image/jasper/cost_optimization.md @@ -0,0 +1,13 @@ +# Jasper - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- Free trial includes a bonus 10000 words. + +## Money-Saving Tips + +- install freestyle and aurora dashboard on Jasper model +- Use the official ProductDigi Jasper AI FREE Trial to get a bonus 10000 words. + diff --git a/models/image/jasper/metadata.json b/models/image/jasper/metadata.json new file mode 100644 index 0000000..a9460a3 --- /dev/null +++ b/models/image/jasper/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "Jasper", + "category": "image", + "last_updated": "2025-08-16T17:49:36.921999", + "extraction_timestamp": "2025-08-16T17:02:01.206160", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 373, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/jasper/parameters.json b/models/image/jasper/parameters.json new file mode 100644 index 0000000..fd7287f --- /dev/null +++ b/models/image/jasper/parameters.json @@ -0,0 +1,12 @@ +{ + "service": "Jasper", + "last_updated": "2025-08-16T17:49:36.814650", + "recommended_settings": {}, + "cost_optimization": { + "tip_0": "Free trial includes a bonus 10000 words." + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/image/jasper/pitfalls.md b/models/image/jasper/pitfalls.md new file mode 100644 index 0000000..e9ac62d --- /dev/null +++ b/models/image/jasper/pitfalls.md @@ -0,0 +1,8 @@ +# Jasper - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Technical Issues + +### ⚠️ original dashboard with error page when opening games on Jasper model + diff --git a/models/image/jasper/prompting.md b/models/image/jasper/prompting.md new file mode 100644 index 0000000..6323ebe --- /dev/null +++ b/models/image/jasper/prompting.md @@ -0,0 +1,13 @@ +# Jasper Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- install freestyle and aurora dashboard on Jasper model +- Use the official ProductDigi Jasper AI FREE Trial to get a bonus 10000 words. + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/image/leonardo-ai/cost_optimization.md b/models/image/leonardo-ai/cost_optimization.md new file mode 100644 index 0000000..5ccbf90 --- /dev/null +++ b/models/image/leonardo-ai/cost_optimization.md @@ -0,0 +1,13 @@ +# Leonardo AI - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- Leonardo allows you to produce free AI images with daily renewal credits, which are sufficient to experiment with art styles. You can buy more because the AI tool is worthwhile to use. +- The "Video" tool costs 250 credits instead of 25. +- Pricing calculator link: https://docs.leonardo.ai/docs/plan-with-the-pricing-calculator +- Dear Leonardo AI Team, paying customers could use Flow State completely free of charge. +- According to the calculator, the cost per image with my configuration should be 16 API credits, but in reality, it costs 24. +- Video tool costs 250 credits instead of 25. + diff --git a/models/image/leonardo-ai/metadata.json b/models/image/leonardo-ai/metadata.json new file mode 100644 index 0000000..33ea91e --- /dev/null +++ b/models/image/leonardo-ai/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "Leonardo AI", + "category": "image", + "last_updated": "2025-08-16T18:35:13.933886", + "extraction_timestamp": null, + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 193, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/leonardo-ai/parameters.json b/models/image/leonardo-ai/parameters.json new file mode 100644 index 0000000..8a10df1 --- /dev/null +++ b/models/image/leonardo-ai/parameters.json @@ -0,0 +1,24 @@ +{ + "service": "Leonardo AI", + "last_updated": "2025-08-16T18:35:13.807189", + "recommended_settings": { + "setting_0": { + "description": "resolution=1280x768" + }, + "setting_1": { + "description": "resolution=1024x640" + } + }, + "cost_optimization": { + "tip_0": "Leonardo allows you to produce free AI images with daily renewal credits, which are sufficient to experiment with art styles. You can buy more because the AI tool is worthwhile to use.", + "tip_1": "The \"Video\" tool costs 250 credits instead of 25.", + "tip_2": "Pricing calculator link: https://docs.leonardo.ai/docs/plan-with-the-pricing-calculator", + "tip_3": "Dear Leonardo AI Team, paying customers could use Flow State completely free of charge.", + "tip_4": "According to the calculator, the cost per image with my configuration should be 16 API credits, but in reality, it costs 24.", + "tip_5": "Video tool costs 250 credits instead of 25." + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/image/leonardo-ai/pitfalls.md b/models/image/leonardo-ai/pitfalls.md new file mode 100644 index 0000000..d0f65ce --- /dev/null +++ b/models/image/leonardo-ai/pitfalls.md @@ -0,0 +1,20 @@ +# Leonardo AI - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Technical Issues + +### ⚠️ A little help? Been trying to upgrade my Leo sub from Apprentice to Artisan, but get this error message: "Error: updatePlanSubscription (quote request): update-plan-subscription stripe and db price id are not matched" Any ideas?? Thx. + +### ⚠️ Leonardo AI API with Griptape crashes Comfy, no error message, just goes to press any key to continue. + +### ⚠️ Hello. I'm a newbie with Leonardo AI. After inputting my prompt, Leonardo AI threw a "no mutations exist" error message and didn't generate anything. What does that mean and how to go around this issue, please. Thanks. + +### ⚠️ I keep getting this error message about a Boolean in Cavas Editor/Image Editor. It happens regardless of the image so I guess it has to do with the settings. I hit the “default reset” on the Settings and it didn’t fix itself. To be clear, I never mess with Settings other than switching to different Models. Anyway, yeah, how do I fix this? I use this service a lot and as recently as yesterday, and I’ve never had this error message before. Any ideas what this is about? + +## Cost & Limits + +### 💰 I've been using the Motion tool everyday for content creation and I'm on the paid plan and today all of a sudden it's been removed and replaced with a "Video" tool that costs 250 credits instead of 25 and that is not even able to do the style I ask it? Is Motion going to come back or? What's happening? + +### 💰 Leonardo AI removed the Motion tool, replaced with a Video tool that costs 250 credits instead of 25 and does not support the requested style. + diff --git a/models/image/leonardo-ai/prompting.md b/models/image/leonardo-ai/prompting.md new file mode 100644 index 0000000..31bea8b --- /dev/null +++ b/models/image/leonardo-ai/prompting.md @@ -0,0 +1,20 @@ +# Leonardo AI Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- If you know how to use prompts appropriately, you will undoubtedly receive greater results. +- Use the character consistency feature in Leonardo AI to get consistent characters and settings. +- The first method works specifically for Leonardo Canvas mode +- Use the 'Concept art' or 'illustration' style when generating images to improve results. + +## Recommended Settings + +- resolution=1280x768 +- resolution=1024x640 + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/image/letsenhance/cost_optimization.md b/models/image/letsenhance/cost_optimization.md new file mode 100644 index 0000000..a2f3d86 --- /dev/null +++ b/models/image/letsenhance/cost_optimization.md @@ -0,0 +1,10 @@ +# LetsEnhance - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- reserve of 30 GPUs +- 5 images per month +- 3 USD per GPU per day + diff --git a/models/image/letsenhance/metadata.json b/models/image/letsenhance/metadata.json new file mode 100644 index 0000000..3b3bc30 --- /dev/null +++ b/models/image/letsenhance/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "LetsEnhance", + "category": "image", + "last_updated": "2025-08-16T17:49:38.673979", + "extraction_timestamp": "2025-08-16T17:46:04.167146", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 44, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/letsenhance/parameters.json b/models/image/letsenhance/parameters.json new file mode 100644 index 0000000..b9c7dad --- /dev/null +++ b/models/image/letsenhance/parameters.json @@ -0,0 +1,14 @@ +{ + "service": "LetsEnhance", + "last_updated": "2025-08-16T17:49:38.604022", + "recommended_settings": {}, + "cost_optimization": { + "tip_0": "reserve of 30 GPUs", + "tip_1": "5 images per month", + "tip_2": "3 USD per GPU per day" + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/image/letsenhance/pitfalls.md b/models/image/letsenhance/pitfalls.md new file mode 100644 index 0000000..ee282b2 --- /dev/null +++ b/models/image/letsenhance/pitfalls.md @@ -0,0 +1,6 @@ +# LetsEnhance - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +*No major issues reported yet. This may indicate limited community data.* + diff --git a/models/image/letsenhance/prompting.md b/models/image/letsenhance/prompting.md new file mode 100644 index 0000000..3744a92 --- /dev/null +++ b/models/image/letsenhance/prompting.md @@ -0,0 +1,13 @@ +# LetsEnhance Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- Use LetsEnhance.io for high-res output +- JPEG compression can cause visual glitches like blockiness, color bleed, and ringing. Let's Enhance AI fixes these quickly: upload your image, choose Smart Enhance, hit 'start processing', and download your restored image. It's that simple. + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/image/modyfi/cost_optimization.md b/models/image/modyfi/cost_optimization.md new file mode 100644 index 0000000..e694a24 --- /dev/null +++ b/models/image/modyfi/cost_optimization.md @@ -0,0 +1,8 @@ +# modyfi - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- Pricing=Free/USD + diff --git a/models/image/modyfi/metadata.json b/models/image/modyfi/metadata.json new file mode 100644 index 0000000..3e3d8b1 --- /dev/null +++ b/models/image/modyfi/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "modyfi", + "category": "image", + "last_updated": "2025-08-16T17:49:37.533393", + "extraction_timestamp": "2025-08-16T17:13:11.124434", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 42, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/modyfi/parameters.json b/models/image/modyfi/parameters.json new file mode 100644 index 0000000..da21f58 --- /dev/null +++ b/models/image/modyfi/parameters.json @@ -0,0 +1,12 @@ +{ + "service": "modyfi", + "last_updated": "2025-08-16T17:49:37.464145", + "recommended_settings": {}, + "cost_optimization": { + "tip_0": "Pricing=Free/USD" + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/image/openart/cost_optimization.md b/models/image/openart/cost_optimization.md new file mode 100644 index 0000000..fa4acf0 --- /dev/null +++ b/models/image/openart/cost_optimization.md @@ -0,0 +1,9 @@ +# OpenArt - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- all of these have a free tier so you can test them out. +- Open Art 50% OFF + diff --git a/models/image/openart/metadata.json b/models/image/openart/metadata.json new file mode 100644 index 0000000..6f0d981 --- /dev/null +++ b/models/image/openart/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "OpenArt", + "category": "image", + "last_updated": "2025-08-16T17:49:37.678257", + "extraction_timestamp": "2025-08-16T17:16:13.255325", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 106, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/openart/parameters.json b/models/image/openart/parameters.json new file mode 100644 index 0000000..a3bc30a --- /dev/null +++ b/models/image/openart/parameters.json @@ -0,0 +1,13 @@ +{ + "service": "OpenArt", + "last_updated": "2025-08-16T17:49:37.607203", + "recommended_settings": {}, + "cost_optimization": { + "tip_0": "all of these have a free tier so you can test them out.", + "tip_1": "Open Art 50% OFF" + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/image/openart/prompting.md b/models/image/openart/prompting.md new file mode 100644 index 0000000..887f732 --- /dev/null +++ b/models/image/openart/prompting.md @@ -0,0 +1,15 @@ +# OpenArt Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- OpenArt offers three templates: Character Vlog, Music Video, and Explainer. +- OpenArt aggregates 50+ AI models and keeps character looks consistent across shots. +- OpenArt's new "one-click story" feature allows users to generate one-minute videos from a single sentence, script, or song. It offers three templates: Character Vlog, Music Video, and Explainer. It maintains +- OpenArt One-Click Story feature allows you to generate a one-minute video by typing a line, pasting a script, or uploading a song. + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/image/phygital/cost_optimization.md b/models/image/phygital/cost_optimization.md new file mode 100644 index 0000000..f3c2c7f --- /dev/null +++ b/models/image/phygital/cost_optimization.md @@ -0,0 +1,8 @@ +# Phygital - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- Pricing=Free/USD + diff --git a/models/image/phygital/metadata.json b/models/image/phygital/metadata.json new file mode 100644 index 0000000..4d0f19e --- /dev/null +++ b/models/image/phygital/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "Phygital", + "category": "image", + "last_updated": "2025-08-16T17:49:37.821434", + "extraction_timestamp": "2025-08-16T17:21:27.937519", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 107, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/phygital/parameters.json b/models/image/phygital/parameters.json new file mode 100644 index 0000000..05fe87c --- /dev/null +++ b/models/image/phygital/parameters.json @@ -0,0 +1,12 @@ +{ + "service": "Phygital", + "last_updated": "2025-08-16T17:49:37.751377", + "recommended_settings": {}, + "cost_optimization": { + "tip_0": "Pricing=Free/USD" + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/image/playform/metadata.json b/models/image/playform/metadata.json new file mode 100644 index 0000000..0f5f9df --- /dev/null +++ b/models/image/playform/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "Playform", + "category": "image", + "last_updated": "2025-08-16T17:49:38.393934", + "extraction_timestamp": "2025-08-16T17:43:08.370701", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 70, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/playform/pitfalls.md b/models/image/playform/pitfalls.md new file mode 100644 index 0000000..2bb2f54 --- /dev/null +++ b/models/image/playform/pitfalls.md @@ -0,0 +1,8 @@ +# Playform - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Technical Issues + +### ⚠️ I was just able to play half an hour ago but it lagged out and crashed. Now when I boot up the game it kicks me out for a different reason. Connection timeout, profile signed out, etc. Internet is completely fine otherwise, although sometimes I lose connection for a few moments when trying boot but not always. It’s always stuck on the "Signing into online playform" message before it forces me out. + diff --git a/models/image/remini/cost_optimization.md b/models/image/remini/cost_optimization.md new file mode 100644 index 0000000..3a2d49e --- /dev/null +++ b/models/image/remini/cost_optimization.md @@ -0,0 +1,9 @@ +# Remini - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- $4.99/week +- the fact that it's free (with a limited number of photos per day) seems suspicious to me. + diff --git a/models/image/remini/metadata.json b/models/image/remini/metadata.json new file mode 100644 index 0000000..1f7b36f --- /dev/null +++ b/models/image/remini/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "Remini", + "category": "image", + "last_updated": "2025-08-16T17:49:38.532390", + "extraction_timestamp": "2025-08-16T17:44:33.087901", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 128, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/image/remini/parameters.json b/models/image/remini/parameters.json new file mode 100644 index 0000000..d1d787e --- /dev/null +++ b/models/image/remini/parameters.json @@ -0,0 +1,13 @@ +{ + "service": "Remini", + "last_updated": "2025-08-16T17:49:38.464585", + "recommended_settings": {}, + "cost_optimization": { + "pricing": "$4.99/week", + "tip_1": "the fact that it's free (with a limited number of photos per day) seems suspicious to me." + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/image/remini/pitfalls.md b/models/image/remini/pitfalls.md new file mode 100644 index 0000000..07c8ee7 --- /dev/null +++ b/models/image/remini/pitfalls.md @@ -0,0 +1,15 @@ +# Remini - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Technical Issues + +### ⚠️ Anyone receive this message before - Enhancement failed. An unknown error occurred. No further information is available. + + +This is on Remini web not the smart phone app. Was working perfectly before, but now comes up with that message every time I try to upload a pic. Video upload works fine and also can upload pics when not logged into my account so it's as if it's an account issue? + +## Cost & Limits + +### 💰 the fact that it's free (with a limited number of photos per day) seems suspicious to me. + diff --git a/models/image/remini/prompting.md b/models/image/remini/prompting.md new file mode 100644 index 0000000..ecb7e63 --- /dev/null +++ b/models/image/remini/prompting.md @@ -0,0 +1,12 @@ +# Remini Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- I uploaded 8 profile pictures of Jessica Lily and then selected a model. + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/text/ai/metadata.json b/models/text/ai/metadata.json deleted file mode 100644 index 6adf27e..0000000 --- a/models/text/ai/metadata.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "service": "AI", - "category": "text", - "last_updated": "2025-08-14T14:56:50.716671", - "extraction_timestamp": "2025-08-14T14:56:50.716671", - "data_sources": [ - "Reddit API", - "Community discussions" - ], - "posts_analyzed": 0, - "confidence": "medium", - "version": "1.0.0" -} \ No newline at end of file diff --git a/models/text/ai/prompting.md b/models/text/ai/prompting.md deleted file mode 100644 index 0eb76c4..0000000 --- a/models/text/ai/prompting.md +++ /dev/null @@ -1,14 +0,0 @@ -# AI Prompting Guide - -*Last updated: 2025-08-14* - -## Tips & Techniques - -- Data collection is the first step in AI. - Collecting data (text, image, sound, dataset) and storing it in a dictionary (memory of words). -- Pattern-recognizing algorithms are used for learning. - Using algorithms like neo-learning, deep-learning, or reinforcement-learning to identify patterns in data. -- Decision-making logic is used for reasoning. - Using decision-making logic like heuristic learning, probabilistic learning or bias learning to pick the best answer. - -## Sources - -- Reddit community discussions -- User-reported experiences diff --git a/models/text/characterai/cost_optimization.md b/models/text/characterai/cost_optimization.md new file mode 100644 index 0000000..ef817a2 --- /dev/null +++ b/models/text/characterai/cost_optimization.md @@ -0,0 +1,14 @@ +# Character.AI - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- Free unlimited messaging + +## Money-Saving Tips + +- Use the Pinned Memories feature to save and pin key messages in each chat to help your Character remember important information. +- Janitor AI is a free NSFW version of Character AI +- Character.AI offers free and unlimited messaging for all users. + diff --git a/models/text/characterai/metadata.json b/models/text/characterai/metadata.json index 7d18625..81ab1c6 100644 --- a/models/text/characterai/metadata.json +++ b/models/text/characterai/metadata.json @@ -1,13 +1,13 @@ { "service": "Character.AI", "category": "text", - "last_updated": "2025-08-14T18:49:04.749081", - "extraction_timestamp": "2025-08-14T18:48:51.676995", + "last_updated": "2025-08-16T18:55:31.861763", + "extraction_timestamp": "2025-08-16T18:37:41.343566", "data_sources": [ "Reddit API", "Community discussions" ], - "posts_analyzed": 100, + "posts_analyzed": 355, "confidence": "medium", "version": "1.0.0" } \ No newline at end of file diff --git a/models/text/characterai/parameters.json b/models/text/characterai/parameters.json index 53924aa..6b06014 100644 --- a/models/text/characterai/parameters.json +++ b/models/text/characterai/parameters.json @@ -1,12 +1,23 @@ { "service": "Character.AI", - "last_updated": "2025-08-12T20:04:44.782084", + "last_updated": "2025-08-16T18:55:31.740490", "recommended_settings": { "setting_0": { - "description": "Enable 'Goro' in the experimental settings under the Style tab" + "description": "character_definition_limit=32000" + }, + "setting_1": { + "description": "reported_character_definition_limit=3200" + }, + "setting_2": { + "description": "memory_pins=5" + }, + "setting_3": { + "description": "style=Goro" } }, - "cost_optimization": {}, + "cost_optimization": { + "unlimited_option": "Free unlimited messaging" + }, "sources": [ "Reddit community", "User reports" diff --git a/models/text/characterai/pitfalls.md b/models/text/characterai/pitfalls.md index e0b44c1..78a8add 100644 --- a/models/text/characterai/pitfalls.md +++ b/models/text/characterai/pitfalls.md @@ -1,18 +1,18 @@ # Character.AI - Common Pitfalls & Issues -*Last updated: 2025-08-12* +*Last updated: 2025-08-16* ## Technical Issues -### ⚠️ image bug where character images do not load +### ⚠️ Bug that stops Character.AI from replying to your messages -### ⚠️ keyboard bug where the on-screen keyboard closes unexpectedly +### ⚠️ Major keyboard bug: keyboard closing bug on mobile -### ⚠️ bug that stops replying to your messages +## Cost & Limits -### ⚠️ disappearing characters bug where characters are unavailable for some users but available for others +### 💰 the 32000 character limit for the character definition (yet reported only 3200 characters is considered) -## Cost & Limits +### 💰 Character.AI has a 32,000 character limit for creating an AI. -### 💰 Character creation character limit of 32,000 characters per AI +### 💰 Free unlimited messaging diff --git a/models/text/characterai/prompting.md b/models/text/characterai/prompting.md index fe33dc4..f7ffbcc 100644 --- a/models/text/characterai/prompting.md +++ b/models/text/characterai/prompting.md @@ -1,10 +1,25 @@ # Character.AI Prompting Guide -*Last updated: 2025-08-14* +*Last updated: 2025-08-16* ## Tips & Techniques +- Use the Pinned Memories feature to save and pin key messages in each chat to help your Character remember important information. - Compared to other sites where the X button is hard to find or is easily mashed in with stuff that will take you to the advertisers site, character ai's ads have clear X buttons and they are placed in a way that fat fingering them or missing by a few inches wont send you to google play store or some random site. +- Janitor AI is a free NSFW version of Character AI +- Bots do not learn from your chats, so you cannot train them by chatting. +- You also use prompts to jailbreak the Character AI and access the NSFW version +- Character.AI offers free and unlimited messaging for all users. +- Model selection is only available to character.ai+ members; if you lose your membership you cannot select models. +- Use a Bluetooth keyboard on your phone to avoid the major keyboard closing bug on mobile. +- Go to your "Style" tab on a character and scroll down to experimental. Hit "Goro", and your chats should be fixed. + +## Recommended Settings + +- character_definition_limit=32000 +- reported_character_definition_limit=3200 +- memory_pins=5 +- style=Goro ## Sources diff --git a/models/text/fliki/cost_optimization.md b/models/text/fliki/cost_optimization.md index 0594357..45f1887 100644 --- a/models/text/fliki/cost_optimization.md +++ b/models/text/fliki/cost_optimization.md @@ -1,8 +1,19 @@ # Fliki - Cost Optimization Guide -*Last updated: 2025-08-12* +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- Promo code FLIKI25ANNUAL (supposed to give 25% off yearly plans) didn’t work at checkout. +- Fliki doesn’t offer a free trial of the Premium plan. +- Premium features include 1000+ ultra‑realistic voices, longer exports, no watermark, and advanced video tools. +- Promo code FLIKISUMMER50 didn’t work. +- 25% off discount on Fliki premium plans for a whole year. +- Promo code FLIKIBLACKFRIDAY50 didn’t work. ## Money-Saving Tips -- 25% off premium plans for a whole year +- Premium plan provides 1000+ ultra‑realistic voices, longer exports, no watermark, and advanced video tools. +- If you need these features, you must subscribe to the Premium plan; there is no free trial available. +- To claim a 25% off discount on Fliki premium plans, go to the Fliki discount page (coupon included), join with a new email, pick a plan and period, then proceed with your subscription. diff --git a/models/text/fliki/metadata.json b/models/text/fliki/metadata.json index 3313b01..8d44595 100644 --- a/models/text/fliki/metadata.json +++ b/models/text/fliki/metadata.json @@ -1,13 +1,13 @@ { "service": "Fliki", "category": "text", - "last_updated": "2025-08-12T20:07:02.281408", - "extraction_timestamp": null, + "last_updated": "2025-08-16T18:55:32.621056", + "extraction_timestamp": "2025-08-16T18:49:06.894427", "data_sources": [ "Reddit API", "Community discussions" ], - "posts_analyzed": 23, + "posts_analyzed": 75, "confidence": "medium", "version": "1.0.0" } \ No newline at end of file diff --git a/models/text/fliki/parameters.json b/models/text/fliki/parameters.json index 55d84e4..1d73e8e 100644 --- a/models/text/fliki/parameters.json +++ b/models/text/fliki/parameters.json @@ -1,9 +1,14 @@ { "service": "Fliki", - "last_updated": "2025-08-12T20:07:02.186904", + "last_updated": "2025-08-16T18:55:32.499344", "recommended_settings": {}, "cost_optimization": { - "tip_0": "25% off premium plans for a whole year" + "tip_0": "Promo code FLIKI25ANNUAL (supposed to give 25% off yearly plans) didn\u2019t work at checkout.", + "tip_1": "Fliki doesn\u2019t offer a free trial of the Premium plan.", + "tip_2": "Premium features include 1000+ ultra\u2011realistic voices, longer exports, no watermark, and advanced video tools.", + "tip_3": "Promo code FLIKISUMMER50 didn\u2019t work.", + "tip_4": "25% off discount on Fliki premium plans for a whole year.", + "tip_5": "Promo code FLIKIBLACKFRIDAY50 didn\u2019t work." }, "sources": [ "Reddit community", diff --git a/models/text/fliki/pitfalls.md b/models/text/fliki/pitfalls.md new file mode 100644 index 0000000..40c81dc --- /dev/null +++ b/models/text/fliki/pitfalls.md @@ -0,0 +1,17 @@ +# Fliki - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Policy & Account Issues + +### ⚠️ Fliki does not offer a free trial of the Premium plan. +**Note**: Be aware of terms of service regarding account creation. + +## Cost & Limits + +### 💰 Free plan has watermark, low‑res exports, and barely any credits. + +### 💰 Free plan: AI voices, image assets, simple video generation, but after a few projects hit limitations — watermark, low‑res exports, barely any credits. + +### 💰 Free plan has watermark, low-res exports, barely any credits. + diff --git a/models/text/fliki/prompting.md b/models/text/fliki/prompting.md index 36aab56..4ce0477 100644 --- a/models/text/fliki/prompting.md +++ b/models/text/fliki/prompting.md @@ -1,10 +1,13 @@ # Fliki Prompting Guide -*Last updated: 2025-08-12* +*Last updated: 2025-08-16* -## Usage Tips +## Tips & Techniques -- Use the Fliki discount page (https://fliki.ai/pricing?via=subreddits) which includes the coupon, sign up with a new email, choose a plan and period, then proceed with subscription to activate a whole year at 25% discount. +- Premium plan provides 1000+ ultra‑realistic voices, longer exports, no watermark, and advanced video tools. +- If you need these features, you must subscribe to the Premium plan; there is no free trial available. +- To claim a 25% off discount on Fliki premium plans, go to the Fliki discount page (coupon included), join with a new email, pick a plan and period, then proceed with your subscription. +- Fliki AI transforms text into engaging videos and voiceovers within minutes. ## Sources diff --git a/models/text/ideogram/cost_optimization.md b/models/text/ideogram/cost_optimization.md index 43ecf92..ac7e5a4 100644 --- a/models/text/ideogram/cost_optimization.md +++ b/models/text/ideogram/cost_optimization.md @@ -1,13 +1,17 @@ # Ideogram - Cost Optimization Guide -*Last updated: 2025-08-14* +*Last updated: 2025-08-16* ## Cost & Pricing Information -- Which, if 3-credit generations is used every time, translates to 216 credits per day. +- Generating 4 images therefore costs $0.04. +- Per API, V2 costs $0.08 per image. +- Only 5 AI credits per use +- So one credit costs $0.02. +- If you book the smallest monthly package on the website, 400 credits cost $8. +- This basically means that once you've run out of priority credits, you can generate 72 batches of images per day....theoretically. But only if you stay up all day and night. Which, if 3-credit generations is used every time, translates to 216 credits per day. ## Money-Saving Tips - No more costly LoRA training is needed. -- This basically means that once you've run out of priority credits, you can generate 72 batches of images per day....theoretically. diff --git a/models/text/ideogram/metadata.json b/models/text/ideogram/metadata.json index 14a6d1f..48a6af7 100644 --- a/models/text/ideogram/metadata.json +++ b/models/text/ideogram/metadata.json @@ -1,13 +1,13 @@ { "service": "Ideogram", "category": "text", - "last_updated": "2025-08-14T18:50:04.941370", - "extraction_timestamp": "2025-08-14T18:49:56.394586", + "last_updated": "2025-08-16T18:55:32.107684", + "extraction_timestamp": "2025-08-16T18:41:34.582364", "data_sources": [ "Reddit API", "Community discussions" ], - "posts_analyzed": 50, + "posts_analyzed": 240, "confidence": "medium", "version": "1.0.0" } \ No newline at end of file diff --git a/models/text/ideogram/parameters.json b/models/text/ideogram/parameters.json index a4c7c7a..bbb8876 100644 --- a/models/text/ideogram/parameters.json +++ b/models/text/ideogram/parameters.json @@ -1,9 +1,11 @@ { "service": "Ideogram", - "last_updated": "2025-08-14T18:50:04.846403", + "last_updated": "2025-08-16T18:55:31.985982", "recommended_settings": {}, "cost_optimization": { - "tip_0": "Which, if 3-credit generations is used every time, translates to 216 credits per day." + "pricing": "If you book the smallest monthly package on the website, 400 credits cost $8.", + "tip_1": "Only 5 AI credits per use", + "tip_2": "This basically means that once you've run out of priority credits, you can generate 72 batches of images per day....theoretically. But only if you stay up all day and night. Which, if 3-credit generations is used every time, translates to 216 credits per day." }, "sources": [ "Reddit community", diff --git a/models/text/ideogram/pitfalls.md b/models/text/ideogram/pitfalls.md index 092a40d..705d060 100644 --- a/models/text/ideogram/pitfalls.md +++ b/models/text/ideogram/pitfalls.md @@ -1,6 +1,10 @@ # Ideogram - Common Pitfalls & Issues -*Last updated: 2025-08-14* +*Last updated: 2025-08-16* -*No major issues reported yet. This may indicate limited community data.* +## Technical Issues + +### ⚠️ Ideogram doesn't have API + +### ⚠️ Ideogram has stepped over the line, increasing wait times between generations to 20 minutes for everyone in slow queue. diff --git a/models/text/ideogram/prompting.md b/models/text/ideogram/prompting.md index 366e81a..fc9115d 100644 --- a/models/text/ideogram/prompting.md +++ b/models/text/ideogram/prompting.md @@ -1,16 +1,21 @@ # Ideogram Prompting Guide -*Last updated: 2025-08-14* +*Last updated: 2025-08-16* ## Tips & Techniques +- Just add `<>` at the end of your prompt, and let the magic happen! +- The composition should be minimal, but unique and striking. +- Ideogram is best for creating logos, icons, and flyers +- Given an input prompt to a text-to-image model, rewrite the prompt into a description of a unique, stunning, captivating and creative image. Before creating the output prompt, first consider the style, and composition before describing the elements that make up the extraordinary image. +- Eli5: Using ChatGPT as a Flux Prompt Enhancer (similar to Ideogram's 'Magic Prompt'). +- Ideogram’s new Character tool brings personality to AI images. You can create consistent visual traits for people or mascots across prompts, ideal for comics, branding, or storytelling. It’s a big step toward persistent identity in generative art. +- Mention all text to be generated explicitly and wrap in double quotes. Do not use double quotes for any other purpose. - Basically, go into dev mods, use the destroy tool to destroy any object directly related to your ideolegion (shrines, ideogram, etc...), randomize symbols, regenerate all buildings and everything should be good again. -- Prompt easily and keep the identity consistent. -- Developers worldwide can build on Ideogram’s powerful character consistency capabilities now. - Remix your style to place your character without masking. - Edit images to place your character in a specific region. +- Prompt easily and keep the identity consistent. - No more costly LoRA training is needed. -- This basically means that once you've run out of priority credits, you can generate 72 batches of images per day....theoretically. ## Sources diff --git a/models/text/resemble-ai/cost_optimization.md b/models/text/resemble-ai/cost_optimization.md index 4e242a7..6be25bc 100644 --- a/models/text/resemble-ai/cost_optimization.md +++ b/models/text/resemble-ai/cost_optimization.md @@ -1,8 +1,9 @@ # Resemble AI - Cost Optimization Guide -*Last updated: 2025-08-14* +*Last updated: 2025-08-16* ## Cost & Pricing Information -- $0.006 per second +- Pricing: basic package starts at $0.006 per second (because that’s not confusing). +- basic package starts at $0.006 per second (because that’s not confusing). diff --git a/models/text/resemble-ai/metadata.json b/models/text/resemble-ai/metadata.json index 9a91563..bd141a1 100644 --- a/models/text/resemble-ai/metadata.json +++ b/models/text/resemble-ai/metadata.json @@ -1,13 +1,13 @@ { "service": "Resemble AI", "category": "text", - "last_updated": "2025-08-14T18:54:04.618837", - "extraction_timestamp": "2025-08-14T18:54:04.363479", + "last_updated": "2025-08-16T18:55:33.152001", + "extraction_timestamp": "2025-08-16T18:54:13.363333", "data_sources": [ "Reddit API", "Community discussions" ], - "posts_analyzed": 15, + "posts_analyzed": 97, "confidence": "medium", "version": "1.0.0" } \ No newline at end of file diff --git a/models/text/resemble-ai/parameters.json b/models/text/resemble-ai/parameters.json index b665ab7..faf997b 100644 --- a/models/text/resemble-ai/parameters.json +++ b/models/text/resemble-ai/parameters.json @@ -1,9 +1,9 @@ { "service": "Resemble AI", - "last_updated": "2025-08-14T18:54:04.497022", + "last_updated": "2025-08-16T18:55:33.003913", "recommended_settings": {}, "cost_optimization": { - "pricing": "$0.006 per second" + "pricing": "basic package starts at $0.006 per second (because that\u2019s not confusing)." }, "sources": [ "Reddit community", diff --git a/models/text/resemble-ai/prompting.md b/models/text/resemble-ai/prompting.md index 430b6e3..01ea674 100644 --- a/models/text/resemble-ai/prompting.md +++ b/models/text/resemble-ai/prompting.md @@ -1,10 +1,11 @@ # Resemble AI Prompting Guide -*Last updated: 2025-08-14* +*Last updated: 2025-08-16* ## Tips & Techniques -- Resemble.ai is an AI-powered text-to-voice and voice-to-voice generator that can be trained with your own voice, or offers a range of voices to choose from. +- Emotion Control: Fine-tune speech expressiveness with a single parameter. From deadpan to dramatic—works out of the box. +- Zero-Shot Voice Cloning: Clone any voice with just a few seconds of reference audio. No finetuning needed. ## Sources diff --git a/models/text/synthesia/metadata.json b/models/text/synthesia/metadata.json index 25bfe4e..f7ed590 100644 --- a/models/text/synthesia/metadata.json +++ b/models/text/synthesia/metadata.json @@ -1,13 +1,13 @@ { "service": "Synthesia", "category": "text", - "last_updated": "2025-08-14T18:51:19.927147", - "extraction_timestamp": "2025-08-14T18:51:11.702010", + "last_updated": "2025-08-16T18:55:32.373561", + "extraction_timestamp": "2025-08-16T18:44:25.745974", "data_sources": [ "Reddit API", "Community discussions" ], - "posts_analyzed": 47, + "posts_analyzed": 217, "confidence": "medium", "version": "1.0.0" } \ No newline at end of file diff --git a/models/text/synthesia/pitfalls.md b/models/text/synthesia/pitfalls.md index f06cb1a..54a046b 100644 --- a/models/text/synthesia/pitfalls.md +++ b/models/text/synthesia/pitfalls.md @@ -1,6 +1,18 @@ # Synthesia - Common Pitfalls & Issues -*Last updated: 2025-08-14* +*Last updated: 2025-08-16* -*No major issues reported yet. This may indicate limited community data.* +## Technical Issues + +### ⚠️ Synthesia also recognizes it just fine, but, once in a while, it stops receiving any input whatsoever from the keyboard. It doesn't display any error message. The USB + +### ⚠️ Synthesia detected a problem and must close:\n\nThe 'tracks' data file couldn't load: Error parsing element attribute\n\nIf you don't think this should have happened, please\ncontact us at support@synthesiagame.com and\ndescribe what you were doing when the problem\noccurred. Thanks for your help! + +### ⚠️ Apps such as Utorrent, synthesia, Mixcraft etc take forever to open and even crash sometimes. + +### ⚠️ synthesia detected a problem and must close. The score data files could not be loaded. Error parsing element attitude. + +## Policy & Account Issues + +### ⚠️ Support has not been helpful… they told me Synthesia and GarageBand don’t output “real MIDI.” I have a brand new FP-30X. I have been trying to use the MIDI interface. To diagnose the problem, I've used two different devices (Macbook Pro, OS 13.3.1, USB connection and iPad Air, USB connection and Bluetooth connection) along with two different pieces of software (GarageBand and Synthesia). The symptoms are identical across all of these devices, diff --git a/models/text/synthesia/prompting.md b/models/text/synthesia/prompting.md index f5c2345..1260af1 100644 --- a/models/text/synthesia/prompting.md +++ b/models/text/synthesia/prompting.md @@ -1,11 +1,20 @@ # Synthesia Prompting Guide -*Last updated: 2025-08-14* +*Last updated: 2025-08-16* ## Tips & Techniques -- Try using a mobile phone instead of a tablet (Android too) and rebooting to solve output lag. -- Check MIDI keyboard compatibility; some keyboards may not respond in Synthesia. +- Steeper pricing, learning curve +- High-quality AI avatars with realistic expressions and lip-syncing +- Best for: Large-scale corporate use +- Using Synthesia can lead to $10K savings and 76% fewer support calls. +- Use a DAW and an interface that allows you to play without latency. +- API & team collaboration tools +- Intuitive drag-and-drop interface for easy video creation +- Great for training, marketing, and internal comms +- Replace "Reaper" with any DAW that you happen to be +- Supports over 120 languages and accents +- 120+ languages, realistic avatars, custom branding ## Sources diff --git a/models/video/klingai/cost_optimization.md b/models/video/klingai/cost_optimization.md new file mode 100644 index 0000000..4853c7b --- /dev/null +++ b/models/video/klingai/cost_optimization.md @@ -0,0 +1,15 @@ +# klingai - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- $6 for each 8 second video +- 20 credits per 5 seconds for video generation + +## Money-Saving Tips + +- During a special sale, image creations on KlingAI did not cost any credits +- The API supports both free and paid Kling accounts. +- The gift card option for KlingAI has the same cost/credit combos as regular subscription plans and is a one-time purchase with no auto-renew or need to cancel later + diff --git a/models/video/klingai/metadata.json b/models/video/klingai/metadata.json new file mode 100644 index 0000000..aaebe88 --- /dev/null +++ b/models/video/klingai/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "klingai", + "category": "image", + "last_updated": "2025-08-16T17:49:37.966179", + "extraction_timestamp": null, + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 88, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/video/klingai/parameters.json b/models/video/klingai/parameters.json new file mode 100644 index 0000000..64d4da1 --- /dev/null +++ b/models/video/klingai/parameters.json @@ -0,0 +1,35 @@ +{ + "service": "klingai", + "last_updated": "2025-08-16T17:49:37.894942", + "recommended_settings": { + "setting_0": { + "description": "kling_access_key=config.kling_access_key" + }, + "setting_1": { + "description": "kling_secret_key=config.kling_secret_key" + }, + "setting_2": { + "description": "alg=HS256" + }, + "setting_3": { + "description": "typ=JWT" + }, + "setting_4": { + "description": "iss=self.ak" + }, + "setting_5": { + "description": "exp=int(time.time())+1800" + }, + "setting_6": { + "description": "nbf=int(time.time())-5" + } + }, + "cost_optimization": { + "pricing": "$6 for each 8 second video", + "tip_1": "20 credits per 5 seconds for video generation" + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/video/klingai/pitfalls.md b/models/video/klingai/pitfalls.md new file mode 100644 index 0000000..3e0e3de --- /dev/null +++ b/models/video/klingai/pitfalls.md @@ -0,0 +1,10 @@ +# klingai - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Technical Issues + +### ⚠️ The error message displayed is 'Kling 1.5 is in high demand right now' + +### ⚠️ Kling 1.5 Standard crashes constantly when generating videos + diff --git a/models/video/klingai/prompting.md b/models/video/klingai/prompting.md new file mode 100644 index 0000000..2d816c9 --- /dev/null +++ b/models/video/klingai/prompting.md @@ -0,0 +1,29 @@ +# klingai Prompting Guide + +*Last updated: 2025-08-16* + +## Tips & Techniques + +- During a special sale, image creations on KlingAI did not cost any credits +- The API supports both free and paid Kling accounts. +- Key features of Kling API v1 include video generation from elements, special effects, virtual try-on, video extension, lip-syncing, and text-to-speech. +- Use KlingAI’s new 'Virtual Try-On' feature: first generate a virtual model (you can even use MidJourney for this), then pick basic tops and bottoms to showcase, upload everything to KlingAI and let the magic happen. +- The gift card option for KlingAI has the same cost/credit combos as regular subscription plans and is a one-time purchase with no auto-renew or need to cancel later +- Kling API v1 offers text-to-video, image-to-video, and image manipulation capabilities. +- Kling API v1 supports model versions 1.5, 1.6, and 2.0. +- You can select up to six elements within an image to define their motion trajectories (supported on 1.5 model). + +## Recommended Settings + +- kling_access_key=config.kling_access_key +- kling_secret_key=config.kling_secret_key +- alg=HS256 +- typ=JWT +- iss=self.ak +- exp=int(time.time())+1800 +- nbf=int(time.time())-5 + +## Sources + +- Reddit community discussions +- User-reported experiences diff --git a/models/video/luma-dream-machine/cost_optimization.md b/models/video/luma-dream-machine/cost_optimization.md new file mode 100644 index 0000000..a3583fa --- /dev/null +++ b/models/video/luma-dream-machine/cost_optimization.md @@ -0,0 +1,8 @@ +# Luma Dream Machine - Cost Optimization Guide + +*Last updated: 2025-08-16* + +## Cost & Pricing Information + +- Premium+ plan: 90 videos per month with Kling 2.1 1080p; each video costs 1,500 credits; monthly credit limit 45,000; actual videos per month = 45,000 / 1,500 = 30 videos. + diff --git a/models/video/luma-dream-machine/metadata.json b/models/video/luma-dream-machine/metadata.json new file mode 100644 index 0000000..3c807ac --- /dev/null +++ b/models/video/luma-dream-machine/metadata.json @@ -0,0 +1,13 @@ +{ + "service": "Luma Dream Machine", + "category": "text", + "last_updated": "2025-08-16T18:55:32.876982", + "extraction_timestamp": "2025-08-16T18:53:04.830332", + "data_sources": [ + "Reddit API", + "Community discussions" + ], + "posts_analyzed": 35, + "confidence": "medium", + "version": "1.0.0" +} \ No newline at end of file diff --git a/models/video/luma-dream-machine/parameters.json b/models/video/luma-dream-machine/parameters.json new file mode 100644 index 0000000..c213be1 --- /dev/null +++ b/models/video/luma-dream-machine/parameters.json @@ -0,0 +1,12 @@ +{ + "service": "Luma Dream Machine", + "last_updated": "2025-08-16T18:55:32.748601", + "recommended_settings": {}, + "cost_optimization": { + "tip_0": "Premium+ plan: 90 videos per month with Kling 2.1 1080p; each video costs 1,500 credits; monthly credit limit 45,000; actual videos per month = 45,000 / 1,500 = 30 videos." + }, + "sources": [ + "Reddit community", + "User reports" + ] +} \ No newline at end of file diff --git a/models/video/luma-dream-machine/pitfalls.md b/models/video/luma-dream-machine/pitfalls.md new file mode 100644 index 0000000..ac86347 --- /dev/null +++ b/models/video/luma-dream-machine/pitfalls.md @@ -0,0 +1,10 @@ +# Luma Dream Machine - Common Pitfalls & Issues + +*Last updated: 2025-08-16* + +## Cost & Limits + +### 💰 Hi. The **Premium+** plan states that I can generate **90 videos** per month with '**Kling 2.1 1080p**', but I found out that each video costs **1,500 credits**. Since the monthly credit limit is **45,000**, that only allows for 45,000 / 1,500 = **30 videos** per month. So where are the **90 videos?** + +### 💰 Premium+ plan: 90 videos per month with Kling 2.1 1080p; each video costs 1,500 credits; monthly credit limit 45,000; actual videos per month = 45,000 / 1,500 = 30 videos. + diff --git a/src/cli.py b/src/cli.py index 7e93cf1..fef8fd6 100644 --- a/src/cli.py +++ b/src/cli.py @@ -430,7 +430,7 @@ async def _targeted(): # Generate queries based on category or all services use_all_patterns = query_limit >= 20 # Use all patterns if query_limit is 20 or more all_queries = generator.generate_queries( - max_queries=1000, # Get all services + max_queries=2000, # Get all services category_filter=category if category else None, use_all_patterns=use_all_patterns @@ -478,14 +478,17 @@ async def _targeted(): async def process_query(query, scraper, batch_processor, llm, semaphore): async with semaphore: try: + logger.info(f"Searching {query['service']}: {query['pattern_type']}") posts = await scraper.scrape(query['query_url'], max_posts=limit) if posts: + logger.info(f"Found {len(posts)} posts for {query['service']} ({query['pattern_type']})") # Batch process with LLM batches = batch_processor.batch_posts_by_tokens(posts, query['service']) results = [] - for batch in batches: + for batch_idx, batch in enumerate(batches, 1): + logger.info(f"Processing batch {batch_idx}/{len(batches)} for {query['service']} ({query['pattern_type']})") result = await batch_processor.process_batch(batch, query['service'], llm) results.append(result) @@ -495,6 +498,7 @@ async def process_query(query, scraper, batch_processor, llm, semaphore): 'results': results } else: + logger.info(f"No posts found for {query['service']} ({query['pattern_type']})") return { 'query': query, 'posts_found': 0, @@ -660,7 +664,7 @@ async def _batch(): # Generate queries for all services in category use_all_patterns = query_limit >= 20 # Use all patterns if query_limit is 20 or more all_queries = generator.generate_queries( - max_queries=1000, # High limit to get all services + max_queries=2000, # High limit to get all services category_filter=category, use_all_patterns=use_all_patterns ) @@ -726,17 +730,21 @@ async def _batch(): service_queries = queries_by_service[service] progress.update(task, description=f"Processing {service}...") - for query in service_queries: + for query_idx, query in enumerate(service_queries, 1): try: + logger.info(f"Query {query_idx}/{len(service_queries)} for {service}: {query['pattern_type']}") posts = await scraper.scrape(query['query_url'], max_posts=limit) if posts: + logger.info(f"Found {len(posts)} posts for {service} ({query['pattern_type']})") # Batch process with LLM batches = batch_processor.batch_posts_by_tokens(posts, service) for batch in batches: result = await batch_processor.process_batch(batch, service, llm) all_results.append(result) + else: + logger.info(f"No posts found for {service} ({query['pattern_type']})") progress.update(task, advance=1) await asyncio.sleep(1) # Rate limiting @@ -1192,6 +1200,81 @@ def search_models(query, limit): console.print(table) +@cli.command(name="update-context") +@click.option("--force", "-f", is_flag=True, help="Force update even if cache is recent") +def update_context(force): + """Update OpenRouter model context cache for accurate token limits.""" + show_banner() + + from src.services.openrouter_context import OpenRouterContextManager + from pathlib import Path + import json + from datetime import datetime, timedelta + + cache_file = Path("data/cache/openrouter_models.json") + + # Check if cache exists and is recent + if cache_file.exists() and not force: + try: + with open(cache_file, 'r') as f: + data = json.load(f) + fetched_at = datetime.fromisoformat(data.get("fetched_at", "")) + age = datetime.now() - fetched_at + + if age < timedelta(hours=24): + console.print(f"[yellow]Cache is recent ({age.total_seconds()/3600:.1f} hours old)[/yellow]") + console.print(f"[dim]Use --force to update anyway[/dim]") + + # Show current stats + models = data.get("models", {}) + console.print(f"\n[cyan]Current cache:[/cyan] {len(models)} models") + + # Show some high-context models + high_context = [(k, v) for k, v in models.items() if v.get("context_length", 0) > 100000] + if high_context: + console.print(f"[green]High-context models:[/green] {len(high_context)}") + for model_id, info in sorted(high_context[:5], key=lambda x: x[1]["context_length"], reverse=True): + console.print(f" • {model_id}: {info['context_length']:,} tokens") + return + except Exception: + pass # Cache is invalid, proceed with update + + # Update cache + console.print("[cyan]Fetching model context information from OpenRouter...[/cyan]") + + manager = OpenRouterContextManager() + models = manager.get_all_models() + + if not models: + console.print("[red]Failed to fetch models from OpenRouter[/red]") + console.print("[yellow]Make sure you have OPENROUTER_API_KEY set in .env[/yellow]") + return + + # Save cache + cache_file.parent.mkdir(parents=True, exist_ok=True) + manager.save_cache(str(cache_file)) + + # Show results + console.print(f"\n[green]✓ Updated context cache with {len(models)} models[/green]") + + # Show some interesting stats + high_context = [(k, v) for k, v in models.items() if v.get("context_length", 0) > 100000] + free_models = [(k, v) for k, v in models.items() + if v.get("pricing", {}).get("prompt", 1) == 0] + + console.print(f"\n[bold]Model Statistics:[/bold]") + console.print(f" • Total models: {len(models)}") + console.print(f" • High-context (>100k): {len(high_context)}") + console.print(f" • Free models: {len(free_models)}") + + if high_context: + console.print(f"\n[bold]Top 5 High-Context Models:[/bold]") + for model_id, info in sorted(high_context[:5], key=lambda x: x[1]["context_length"], reverse=True): + console.print(f" • {model_id}: {info['context_length']:,} tokens") + + console.print(f"\n[dim]Cache saved to: {cache_file}[/dim]") + + @cli.command() def schedule(): """Run periodic scraping based on SCRAPING_INTERVAL_HOURS setting.""" diff --git a/src/scrapers/service_discovery.py b/src/scrapers/service_discovery.py index 087a006..bb17545 100644 --- a/src/scrapers/service_discovery.py +++ b/src/scrapers/service_discovery.py @@ -43,6 +43,18 @@ def __init__(self): { 'url': 'https://raw.githubusercontent.com/mahseema/awesome-ai-tools/main/README.md', 'name': 'awesome-ai-tools' + }, + { + 'url': 'https://raw.githubusercontent.com/filipecalegario/awesome-generative-ai/main/README.md', + 'name': 'filipecalegario-awesome-generative-ai' + }, + { + 'url': 'https://raw.githubusercontent.com/aishwaryanr/awesome-generative-ai-guide/main/README.md', + 'name': 'awesome-generative-ai-guide' + }, + { + 'url': 'https://raw.githubusercontent.com/eudk/awesome-ai-tools/main/README.md', + 'name': 'eudk-awesome-ai-tools' } ] @@ -114,18 +126,55 @@ async def _parse_awesome_list(self, session: aiohttp.ClientSession, list_info: D def _is_likely_service(self, name: str, url: str, description: str) -> bool: """Determine if entry is likely an AI service vs documentation/article""" - # Skip common non-service entries - skip_keywords = ['tutorial', 'guide', 'paper', 'book', 'course', 'awesome', 'list', 'collection'] + # Skip common non-service entries and article-like titles + skip_keywords = ['tutorial', 'guide', 'paper', 'book', 'course', 'awesome', 'list', + 'collection', 'article', 'news', 'blog', 'post', 'story', 'report', + 'analysis', 'review', 'opinion', 'party', 'craze', 'trend', 'revolution', + 'gold rush', 'heralds', 'announcement', 'release', 'sparks', 'transform'] name_lower = name.lower() + description_lower = description.lower() + # Skip if name sounds like an article title (too many words, contains sentence-like structure) + if len(name.split()) > 5: # Service names are typically short + return False + if any(keyword in name_lower for keyword in skip_keywords): return False + + # Skip descriptions that sound like article summaries + article_indicators = ['article', 'op-ed', 'announcement', 'examination of', 'summarizing', + 'comprehensive look', 'explores', 'discusses', 'argues'] + if any(indicator in description_lower for indicator in article_indicators): + return False - # Look for service indicators - service_indicators = ['api', 'platform', 'tool', 'model', 'generate', 'ai', 'llm', 'gpt'] + # Check URL patterns that indicate actual services + if url: + # Skip news/article URLs + if any(domain in url.lower() for domain in ['/blog/', '/news/', '/article/', + 'medium.com', 'arxiv.org', 'youtube.com']): + return False + + service_domains = ['.ai', '.io', 'api.', 'app.', 'platform.', 'cloud.', + 'huggingface.co', 'openai.com', 'anthropic.com', 'cohere.com', + 'replicate.com', 'stability.ai', 'github.com/.*api', 'github.com/.*sdk'] + if any(domain in url.lower() for domain in service_domains): + return True + + # Look for service indicators (more specific now) + service_indicators = ['api access', 'platform for', 'sdk', 'model api', 'inference', + 'endpoint', 'deployment', 'hosted', 'cloud service', 'playground', + 'provides access to', 'api for'] combined = (name + ' ' + description).lower() - return any(indicator in combined for indicator in service_indicators) + # Require at least one strong indicator + strong_indicators = ['api', 'sdk', 'platform', 'model api', 'inference service', + 'provides access to', 'api for'] + has_strong = any(indicator in combined for indicator in strong_indicators) + + # If name looks like a product name (single word or two words max) + is_product_name = len(name.split()) <= 2 + + return has_strong and is_product_name def _clean_service_name(self, name: str) -> str: """Clean and normalize service name""" @@ -137,20 +186,71 @@ def _clean_service_name(self, name: str) -> str: def _infer_category(self, name: str, description: str) -> str: """Infer service category from name and description""" + name_lower = name.lower() + desc_lower = description.lower() combined = (name + ' ' + description).lower() - categories = { - 'text': ['llm', 'language model', 'chat', 'text', 'gpt', 'claude', 'writing'], - 'image': ['image', 'picture', 'photo', 'art', 'dall-e', 'midjourney', 'stable diffusion'], - 'video': ['video', 'animation', 'motion', 'runway', 'pika'], - 'audio': ['audio', 'voice', 'speech', 'music', 'sound', 'tts', 'elevenlabs'], - 'code': ['code', 'copilot', 'programming', 'developer'], - 'multimodal': ['multimodal', 'vision', 'multi-modal'] - } + # Priority-based categorization with weighted keywords + # Check for most specific categories first + + # Video - very specific keywords + video_strong = ['video', 'animation', 'motion graphics', 'movie', 'film', 'footage'] + video_names = ['synthesia', 'heygen', 'runway', 'pika', 'pictory', 'fliki', + 'invideo', 'luma', 'kaiber', 'genmo', 'hour one', 'deepbrain', + 'colossyan', 'elai', 'steve.ai', 'rephrase', 'd-id'] + if any(kw in name_lower for kw in video_names): + return 'video' + if any(kw in combined for kw in video_strong): + return 'video' + + # Audio - specific audio keywords + audio_strong = ['audio', 'voice', 'speech', 'music', 'sound', 'tts', 'text-to-speech', + 'voice synthesis', 'voice clone', 'podcast', 'transcription'] + audio_names = ['elevenlabs', 'eleven labs', 'murf', 'play.ht', 'wellsaid', + 'resemble', 'descript', 'overdub', 'respeecher', 'sonantic'] + if any(kw in name_lower for kw in audio_names): + return 'audio' + if any(kw in combined for kw in audio_strong): + return 'audio' + + # Image - specific image keywords + image_strong = ['image', 'picture', 'photo', 'art', 'drawing', 'illustration', + 'graphic', 'visual', 'paint', 'design', 'artwork'] + image_names = ['dall-e', 'midjourney', 'stable diffusion', 'leonardo', 'ideogram', + 'dreamstudio', 'nightcafe', 'artbreeder', 'deep dream'] + if any(kw in name_lower for kw in image_names): + return 'image' + if any(kw in combined for kw in image_strong): + return 'image' + + # Code - programming specific + code_strong = ['code', 'programming', 'developer', 'ide', 'compiler', 'debugger', + 'repository', 'github', 'coding assistant'] + code_names = ['copilot', 'codeium', 'cursor', 'tabnine', 'codex', 'replit'] + if any(kw in name_lower for kw in code_names): + return 'code' + if any(kw in combined for kw in code_strong): + return 'code' + + # Multimodal - handles multiple modalities + multimodal_keywords = ['multimodal', 'vision', 'multi-modal', 'image and text', + 'vision language', 'vlm', 'visual language'] + if any(kw in combined for kw in multimodal_keywords): + return 'multimodal' - for category, keywords in categories.items(): - if any(keyword in combined for keyword in keywords): - return category + # Text/LLM - language models and text generation + # Check this AFTER more specific categories to avoid false positives + text_strong = ['llm', 'language model', 'chatbot', 'chat assistant', 'gpt', + 'claude', 'writing assistant', 'text generation', 'conversation'] + text_names = ['openai', 'anthropic', 'claude', 'gpt', 'mistral', 'llama', + 'gemini', 'palm', 'character.ai', 'replika'] + # Exclude if it's clearly about text-to-X conversion + if not any(pattern in combined for pattern in ['text-to-video', 'text-to-image', + 'text-to-speech', 'text-to-audio']): + if any(kw in name_lower for kw in text_names): + return 'text' + if any(kw in combined for kw in text_strong): + return 'text' return 'general' diff --git a/src/scrapers/targeted_search_generator.py b/src/scrapers/targeted_search_generator.py index 58ae19b..9e8ea8b 100644 --- a/src/scrapers/targeted_search_generator.py +++ b/src/scrapers/targeted_search_generator.py @@ -208,6 +208,13 @@ def generate_queries(self, max_queries: int = 100, category_filter: str = None, for service_key, service_data in filtered_services.items(): prioritized_services.append((service_data, 'medium')) + # If we still have room and want more services, add non-priority ones + # This ensures we use all available services when max_queries allows + if len(prioritized_services) < len(filtered_services): + for service_key, service_data in filtered_services.items(): + if service_data not in [s[0] for s in prioritized_services]: + prioritized_services.append((service_data, 'normal')) + # Calculate services to process if use_all_patterns: # When using all patterns, we generate 20 queries per service @@ -247,7 +254,7 @@ def generate_queries(self, max_queries: int = 100, category_filter: str = None, if len(queries) >= max_queries: break - logger.info(f"Generated {len(queries)} targeted search queries") + logger.info(f"Generated {len(queries)} targeted search queries (max_queries={max_queries})") # Group by service for summary by_service = {} diff --git a/src/services/batch_llm_processor.py b/src/services/batch_llm_processor.py index ae95334..c661ceb 100644 --- a/src/services/batch_llm_processor.py +++ b/src/services/batch_llm_processor.py @@ -302,66 +302,70 @@ async def process_batch(self, posts: List[Dict], service_name: str, llm_processo # The LLM will naturally work with what fits in its context prompt = self.create_batch_prompt(posts, service_name) - try: - # Process with LLM - response = await llm_processor.process_raw_prompt(prompt) - - # Check if response is empty - if not response or response.strip() == "": - logger.error(f"Empty response from LLM for {service_name}") - return { - "service": service_name, - "problems": [], - "tips": [], - "cost_info": [], - "settings": [], - "error": "Empty response from LLM", - "batch_size": len(posts) - } - - # Parse response - result = json.loads(response) - - # Clean settings - ensure they are strings not dicts - if 'settings' in result: - clean_settings = [] - for setting in result['settings']: - if isinstance(setting, dict): - # Convert dict to string format - for key, value in setting.items(): - clean_settings.append(f"{key} = {value}") - elif isinstance(setting, str): - clean_settings.append(setting) - result['settings'] = clean_settings - - # Add metadata - result['batch_size'] = len(posts) - result['timestamp'] = datetime.now().isoformat() - result['token_count'] = prompt_tokens - - return result - - except json.JSONDecodeError as e: - logger.error(f"Failed to parse LLM response: {e}") - logger.error(f"Raw response: {response[:500]}...") - return { - "service": service_name, - "problems": [], - "optimizations": [], - "parameters": [], - "error": str(e), - "batch_size": len(posts) - } - except Exception as e: - logger.error(f"Batch processing failed: {e}") - return { - "service": service_name, - "problems": [], - "optimizations": [], - "parameters": [], - "error": str(e), - "batch_size": len(posts) - } + # Retry logic for JSON parsing failures + max_retries = 3 + for attempt in range(max_retries): + try: + # Process with LLM + response = await llm_processor.process_raw_prompt(prompt) + + # Check if response is empty + if not response or response.strip() == "": + logger.error(f"Empty response from LLM for {service_name}") + return { + "service": service_name, + "problems": [], + "tips": [], + "cost_info": [], + "settings": [], + "error": "Empty response from LLM", + "batch_size": len(posts) + } + + # Parse response + result = json.loads(response) + + # Clean settings - ensure they are strings not dicts + if 'settings' in result: + clean_settings = [] + for setting in result['settings']: + if isinstance(setting, dict): + # Convert dict to string format + for key, value in setting.items(): + clean_settings.append(f"{key} = {value}") + elif isinstance(setting, str): + clean_settings.append(setting) + result['settings'] = clean_settings + + # Add metadata + result['batch_size'] = len(posts) + result['timestamp'] = datetime.now().isoformat() + result['token_count'] = prompt_tokens + + # Success - return result + if attempt > 0: + logger.info(f"JSON parsing succeeded on attempt {attempt + 1} for {service_name}") + return result + + except json.JSONDecodeError as e: + logger.error(f"Failed to parse LLM response (attempt {attempt + 1}/{max_retries}): {e}") + logger.error(f"Raw response: {response[:500]}...") + + if attempt < max_retries - 1: + logger.info(f"Retrying JSON parsing for {service_name}...") + # Add a small delay before retry + await asyncio.sleep(1) + else: + logger.error(f"All {max_retries} attempts failed for {service_name}") + return { + "service": service_name, + "problems": [], + "tips": [], + "cost_info": [], + "settings": [], + "error": str(e), + "batch_size": len(posts) + } def merge_results(self, results: List[Dict]) -> Dict: """Merge multiple batch results into one"""