-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathipfs_datasets_cli.py
More file actions
executable file
·4135 lines (3660 loc) · 188 KB
/
ipfs_datasets_cli.py
File metadata and controls
executable file
·4135 lines (3660 loc) · 188 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
IPFS Datasets CLI Tool
A lightweight command line interface that provides convenient access to the MCP tools
with minimal imports. Only loads what's needed when needed.
"""
import sys
import os
import subprocess
import json
from pathlib import Path
# Install CLI error handler early
try:
from ipfs_datasets_py.error_reporting.cli_error_reporter import install_cli_error_handler
install_cli_error_handler()
except ImportError:
# Error reporting not available, continue without it
pass
# ============================================================================
# Dynamic Tool Discovery and Execution (from enhanced_cli.py)
# ============================================================================
class DynamicToolRunner:
"""Dynamically discover and run MCP tools."""
def __init__(self):
self.tools_dir = Path(__file__).parent / "ipfs_datasets_py" / "mcp_server" / "tools"
self.discovered_tools = {}
self.discover_tools()
def discover_tools(self):
"""Discover all available tools."""
if not self.tools_dir.exists():
return
for category_dir in self.tools_dir.iterdir():
if not category_dir.is_dir() or category_dir.name.startswith('_'):
continue
category_name = category_dir.name
self.discovered_tools[category_name] = {}
# Look for Python files in the category
for tool_file in category_dir.glob("*.py"):
if tool_file.name.startswith('_') or tool_file.name == "__init__.py":
continue
tool_name = tool_file.stem
module_path = f"ipfs_datasets_py.mcp_server.tools.{category_name}.{tool_name}"
self.discovered_tools[category_name][tool_name] = module_path
def get_categories(self):
"""Get list of available tool categories."""
return sorted(self.discovered_tools.keys())
def get_tools(self, category):
"""Get list of tools in a category."""
return sorted(self.discovered_tools.get(category, {}).keys())
def get_tool_count(self, category):
"""Get count of tools in a category."""
return len(self.discovered_tools.get(category, {}))
async def run_tool(self, category, tool, **kwargs):
"""Run a specific tool."""
import importlib
import inspect
if category not in self.discovered_tools:
return {"status": "error", "error": f"Category '{category}' not found"}
if tool not in self.discovered_tools[category]:
return {"status": "error", "error": f"Tool '{tool}' not found in category '{category}'"}
module_path = self.discovered_tools[category][tool]
try:
# Import the module
module = importlib.import_module(module_path)
# Find callable functions in the module.
functions = []
for name, obj in inspect.getmembers(module):
if inspect.isfunction(obj) and not name.startswith('_'):
functions.append((name, obj))
if not functions:
return {"status": "error", "error": f"No callable functions found in {module_path}"}
# Resolve deterministically: exact match first, then <tool>_tool.
functions_by_name = {name: fn for name, fn in functions}
target_name = None
if tool in functions_by_name:
target_name = tool
elif f"{tool}_tool" in functions_by_name:
target_name = f"{tool}_tool"
elif len(functions) == 1:
# Backward-compatible fallback for single-function modules only.
target_name = functions[0][0]
else:
available = sorted(functions_by_name.keys())
return {
"status": "error",
"error": (
f"Ambiguous callable selection for tool '{tool}' in {module_path}. "
f"Available callables: {available}. "
"Expected an exact callable named '<tool>' or '<tool>_tool'."
),
}
target_function = functions_by_name[target_name]
# Get function signature
sig = inspect.signature(target_function)
# Validate kwargs before invocation to avoid silent argument drops.
supports_var_kwargs = any(
p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()
)
if supports_var_kwargs:
filtered_kwargs = dict(kwargs)
else:
allowed = {
name
for name, p in sig.parameters.items()
if p.kind in (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY,
)
}
unknown = sorted(set(kwargs.keys()) - allowed)
if unknown:
return {
"status": "error",
"error": (
f"Unexpected arguments for callable '{target_name}': {unknown}. "
f"Allowed arguments: {sorted(allowed)}"
),
}
filtered_kwargs = {k: v for k, v in kwargs.items() if k in allowed}
missing_required = []
for name, p in sig.parameters.items():
if p.kind not in (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY,
):
continue
if p.default is inspect.Parameter.empty and name not in filtered_kwargs:
missing_required.append(name)
if missing_required:
return {
"status": "error",
"error": (
f"Missing required arguments for callable '{target_name}': "
f"{sorted(missing_required)}"
),
}
# Call the function
import asyncio
if asyncio.iscoroutinefunction(target_function):
result = await target_function(**filtered_kwargs)
else:
result = target_function(**filtered_kwargs)
# Ensure result is a dict
if not isinstance(result, dict):
result = {"status": "success", "result": str(result)}
return result
except ImportError as e:
return {"status": "error", "error": f"Failed to import {module_path}: {e}"}
except Exception as e:
return {"status": "error", "error": f"Failed to run tool: {e}"}
def print_result(result, format_type="pretty"):
"""Print results in a user-friendly format."""
if format_type == "json":
print(json.dumps(result, indent=2))
return
if result.get("status") == "success":
print("✅ Success!")
if "message" in result:
print(f"Message: {result['message']}")
if "dataset_id" in result:
print(f"Dataset ID: {result['dataset_id']}")
if "summary" in result:
summary = result["summary"]
if isinstance(summary, dict):
print("Summary:")
for key, value in summary.items():
print(f" {key}: {value}")
else:
print(f"Summary: {summary}")
if "result" in result and result["result"] != result.get("message"):
print(f"Result: {result['result']}")
else:
print("❌ Error!")
if "error" in result:
print(f"Error: {result['error']}")
if "message" in result:
print(f"Message: {result['message']}")
def parse_tool_args(args_list):
"""Parse tool arguments from command line."""
kwargs = {}
i = 0
while i < len(args_list):
arg = args_list[i]
if arg.startswith('--'):
key = arg[2:] # Remove --
if i + 1 < len(args_list) and not args_list[i + 1].startswith('--'):
value = args_list[i + 1]
# Try to parse as JSON, otherwise keep as string
try:
kwargs[key] = json.loads(value)
except json.JSONDecodeError:
kwargs[key] = value
i += 2
else:
# Boolean flag
kwargs[key] = True
i += 1
else:
i += 1
return kwargs
# ============================================================================
# Original CLI Functions
# ============================================================================
def show_help():
"""Show CLI help without importing anything heavy."""
help_text = """
ipfs-datasets-cli - IPFS Datasets CLI Tool
Usage:
ipfs-datasets [command] [subcommand] [options]
ipfs-datasets --help
ipfs-datasets --version
Commands:
info System information
status Show system status
version Show version information
defaults Show resolved host/port/gateway defaults
save-defaults Persist host/port/gateway to config
mcp MCP server management
start Start MCP server
stop Stop MCP server
status Show MCP server status
logs Tail MCP dashboard logs
tools Tool management
categories List available tool categories (all discovered tools)
list List tools in a category
execute Execute a specific tool
run Run a tool directly: tools run <category> <tool> [--arg value ...]
vscode VSCode CLI management
status Show VSCode CLI installation status
install Install or update VSCode CLI
auth Configure authentication (GitHub/Microsoft)
install-with-auth Install and authenticate in one step
execute Execute VSCode CLI command
extensions List, install, or uninstall extensions
tunnel Manage VSCode tunnel functionality
github GitHub CLI management
status Show GitHub CLI installation status
install Install or update GitHub CLI
auth Manage GitHub authentication
execute Execute GitHub CLI command
legal-pdf Legal PDF rendering helpers
render Render state-court filings and exhibit binder components
merge Merge PDF bundles and count pages
copilot GitHub Copilot CLI management
status Show Copilot CLI installation status
install Install or update Copilot CLI extension
explain Get AI explanation for code
suggest Get command suggestions from natural language
git Get Git command suggestions
gemini Google Gemini CLI management
status Show Gemini CLI installation status
install Install Gemini CLI
config Configure API key and settings
execute Execute Gemini CLI command
claude Anthropic Claude CLI management
status Show Claude CLI installation status
install Install Claude CLI
config Configure API key and settings
execute Execute Claude CLI command
p2p P2P workflow scheduling (bypass GitHub API)
init Initialize P2P scheduler
schedule Schedule a workflow for P2P execution
next Get next workflow from queue
status Get scheduler status
add-peer Add a peer to the network
remove-peer Remove a peer from the network
tags List available workflow tags
dataset Dataset operations
load Load a dataset
convert Convert dataset format
ipfs IPFS operations
pin Pin data to IPFS
get Get data from IPFS (supports --gateway)
vector Vector operations
create Create vector embeddings
search Search vectors
graph Knowledge graph operations
create Initialize a knowledge graph database
add-entity Add an entity to the graph
add-rel Add a relationship between entities
query Execute a Cypher query
search Hybrid search (semantic + keyword)
tx-begin Begin a transaction
tx-commit Commit a transaction
tx-rollback Rollback a transaction
index Create an index
constraint Add a constraint
legal Legal dataset shortcuts
search-court-rules Search federal/state court rules via legal dataset tools
search-federal-register Search Federal Register corpus via legal dataset tools
scrape-netherlands-laws Scrape Netherlands laws from official Dutch government sources
finance Financial analysis and data pipelines
stock Fetch stock market data
news Scrape financial news (AP, Reuters, Bloomberg)
executives Analyze executive performance (hypothesis testing)
embeddings Multimodal embedding analysis (text + images)
theorems List or apply financial theorems
workflow Execute end-to-end workflow pipelines
discord Discord data export and analysis
guilds List accessible Discord servers
channels List channels in a server
dms List direct message channels
export Export a Discord channel
export-guild Export entire Discord server
export-dms Export all direct messages (native exportdm)
export-all Export all accessible content
analyze Analyze a Discord channel
analyze-export Analyze exported Discord data
status Check Discord integration status
install Install DiscordChatExporter
email Email ingestion and analysis
test Test email server connection
folders List IMAP mailbox folders
export Export emails from a folder
parse Parse an .eml file
fetch Fetch emails (no export)
analyze Analyze an email export file
search Search emails in an export file
google-voice Parse Google Voice Takeout exports
google-voice-vault Parse Google Workspace Vault Voice exports
google-voice-data-export Parse Google Workspace Data Export Voice bundles and gs:// sources
google-voice-watch Watch a local folder and auto-hydrate Voice exports
google-voice-takeout-url Build a custom consumer Google Takeout URL for Voice exports
google-voice-takeout-open Open the custom Takeout URL in Playwright
google-voice-takeout-capture Open the custom Takeout URL and wait for an archive download
google-voice-takeout-source Save Takeout page source HTML for data-id inference
google-voice-takeout-poll Poll a local download directory for a completed Takeout archive
google-voice-takeout-drive Poll Google Drive for a Takeout artifact and optionally download it
google-voice-takeout-status Summarize a saved Takeout acquisition manifest
google-voice-takeout-doctor Diagnose a saved Takeout acquisition manifest and suggest the next step
google-voice-takeout-history List archived snapshot history for a Takeout acquisition manifest
google-voice-takeout-prune Prune old archived snapshot history for a Takeout acquisition manifest
google-voice-takeout-case-summary Show a concise summary for a Takeout case/download directory or manifest
google-voice-takeout-case-report Export a markdown or HTML report for a Takeout case/download directory or manifest
google-voice-takeout-case-bundle Collect the latest manifest, history snapshots, and case reports into one archival folder
workspace Inspect, search, export, and package workspace dataset bundles
--action summary Read a lightweight workspace bundle summary
--action inspect Read bundle sections and artifact counts
--action load Load the full dataset-shaped bundle payload
--action report Render a human-readable markdown/text bundle report
--action search-bm25 Search a workspace bundle with grouped BM25 results
--action search-vector Search a workspace bundle with grouped vector results
--action export Export a workspace bundle from generic or source-specific inputs
--action package Package a workspace bundle into chain-loadable artifacts
--action package-search-bm25 Search a packaged workspace bundle with grouped BM25 results
--action package-search-vector Search a packaged workspace bundle with grouped vector results
history-index Search persisted DuckDB history/GraphRAG index
docket Import a docket into a reusable dataset artifact
chunks Search chunk text and metadata
documents Search indexed documents
entities Search extracted entities
relationships Search extracted relationships
detect-type File type detection for GraphRAG
detect Detect single file type
batch Batch detect multiple files
methods List available detection methods
Options:
--help, -h Show this help message
--version Show version information
--json Output in JSON format
--verbose Verbose output
--config Path to CLI config JSON (overrides default)
--host, -H Override dashboard host
--port, -p Override dashboard port
--gateway, -g Override IPFS HTTP gateway (ipfs get)
Environment:
IPFS_DATASETS_HOST Default dashboard host (e.g., 127.0.0.1)
IPFS_DATASETS_PORT Default dashboard port (e.g., 8899)
IPFS_DATASETS_CLI_CONFIG Path to CLI config JSON
IPFS_HTTP_GATEWAY Default IPFS gateway (e.g., https://ipfs.io)
IPFS_DATASETS_IPFS_GATEWAY Alternate env var for gateway
Config:
~/.ipfs_datasets/cli.json Optional defaults for host/port, e.g.:
{"host": "127.0.0.1", "port": "8899", "gateway": "https://ipfs.io"}
Precedence: flags > env > config file > hardcoded defaults
Examples:
ipfs-datasets info status
ipfs-datasets info defaults --json
ipfs-datasets ipfs get QmHash --gateway https://ipfs.io --out /tmp/file
ipfs-datasets mcp start
ipfs-datasets tools categories
ipfs-datasets dataset load ./data.json
ipfs-datasets graph create --driver-url ipfs://localhost:5001
ipfs-datasets graph add-entity --id person1 --type Person --props '{"name":"Alice"}'
ipfs-datasets graph query --cypher "MATCH (n) RETURN n LIMIT 10"
ipfs-datasets legal search-court-rules --collection_name court_rules --query_vector "[0.1,0.2,0.3]" --jurisdiction both
ipfs-datasets legal search-court-rules --collection_name court_rules --query_text "rules for filing motions" --jurisdiction federal
ipfs-datasets legal search-federal-register --collection_name federal_register_docs --query_text "EPA emissions reporting rule"
ipfs-datasets legal scrape-netherlands-laws --document_urls '["https://wetten.overheid.nl/BWBR0001854/"]'
For detailed help on a specific command:
ipfs-datasets [command] --help
"""
print(help_text.strip())
def show_version():
"""Show version without importing anything heavy."""
print("ipfs-datasets CLI v1.0.0")
def show_status():
"""Show basic system status without heavy imports."""
print("System Status: CLI tool is available")
print("Version: 1.0.0")
print("Python:", sys.version.split()[0])
print("Path:", Path(__file__).parent)
def _load_cli_config(config_override: str | None = None) -> dict:
"""Load optional CLI config from ~/.ipfs_datasets/cli.json.
Returns empty dict if not present or invalid.
"""
try:
if config_override:
cfg_path = Path(config_override)
else:
env_cfg = os.environ.get("IPFS_DATASETS_CLI_CONFIG")
cfg_path = Path(env_cfg) if env_cfg else (Path.home() / ".ipfs_datasets" / "cli.json")
if not cfg_path.exists():
return {}
data = json.loads(cfg_path.read_text())
if isinstance(data, dict):
return data
except Exception:
pass
return {}
def _default_host_port(config_override: str | None = None):
"""Resolve default host/port using precedence: env > config > hardcoded."""
cfg = _load_cli_config(config_override)
host = os.environ.get("IPFS_DATASETS_HOST") or cfg.get("host") or "127.0.0.1"
port = os.environ.get("IPFS_DATASETS_PORT") or cfg.get("port") or "8899"
return str(host), str(port)
def _default_gateway(config_override: str | None = None, override: str | None = None) -> str | None:
"""Resolve default IPFS HTTP gateway.
Precedence: explicit override > env (IPFS_HTTP_GATEWAY or IPFS_DATASETS_IPFS_GATEWAY) > config > None.
"""
if override:
return override
gw_env = os.environ.get("IPFS_HTTP_GATEWAY") or os.environ.get("IPFS_DATASETS_IPFS_GATEWAY")
if gw_env:
return gw_env
cfg = _load_cli_config(config_override)
if isinstance(cfg, dict):
gw = cfg.get("gateway")
if gw:
return gw
return None
def execute_heavy_command(args):
"""Execute commands that require heavy imports - only import when needed."""
# Only import heavy modules when actually executing commands
try:
import anyio
import json
import importlib
from typing import Any, Dict, List
from pathlib import Path as PathLib
# Setup sys path for imports
current_dir = PathLib(__file__).parent
if str(current_dir) not in sys.path:
sys.path.insert(0, str(current_dir))
# Detect global JSON flag and strip it from args
json_output = False
if '--json' in args:
json_output = True
args = [a for a in args if a != '--json']
# Heavy command execution logic here
command = args[0] if args else None
if command == "alerts":
from ipfs_datasets_py.alerts.cli import main as alerts_main
alerts_args = list(args[1:])
if json_output:
alerts_args = ["--json", *alerts_args]
sys.exit(alerts_main(alerts_args))
if command == "workflow-automation":
from ipfs_datasets_py.workflow_automation.cli import main as wa_main
wa_args = list(args[1:])
if json_output:
wa_args = ["--json", *wa_args]
sys.exit(wa_main(wa_args))
if command == "search":
from ipfs_datasets_py.search.cli import main as search_main
search_args = list(args[1:])
if json_output:
search_args = ["--json", *search_args]
sys.exit(search_main(search_args))
if command == "p2p-networking":
from ipfs_datasets_py.p2p_networking.cli import main as p2p_net_main
p2p_args = list(args[1:])
if json_output:
p2p_args = ["--json", *p2p_args]
sys.exit(p2p_net_main(p2p_args))
if command == "logic":
from ipfs_datasets_py.logic.cli import main as logic_main
logic_args = list(args[1:])
if json_output:
logic_args = ["--json", *logic_args]
sys.exit(logic_main(logic_args))
if command == "legal":
subcommand = args[1] if len(args) > 1 else None
if subcommand in ("search-court-rules", "court-rules-search", "search_court_rules"):
tool_args = args[2:]
parameters = parse_tool_args(tool_args)
parameters = {str(k).replace("-", "_"): v for k, v in parameters.items()}
if not parameters.get("collection_name"):
print("Usage: ipfs-datasets legal search-court-rules --collection_name NAME (--query_vector '[...]' | --query_text '...') [--jurisdiction federal|state|both] [--state OR]")
return
if "query_vector" not in parameters:
query_text = str(parameters.get("query_text") or "").strip()
if not query_text:
print("Usage: ipfs-datasets legal search-court-rules --collection_name NAME (--query_vector '[...]' | --query_text '...') [--jurisdiction federal|state|both] [--state OR]")
return
try:
from ipfs_datasets_py.embeddings_router import embed_text
embedding_model = (
str(parameters.get("embedding_model") or parameters.get("model_name") or "").strip()
or "thenlper/gte-small"
)
embedding_provider = str(parameters.get("embedding_provider") or "").strip() or None
query_vector = embed_text(
query_text,
model_name=embedding_model,
provider=embedding_provider,
)
parameters["query_vector"] = query_vector
except Exception as e:
err = {
"status": "error",
"error": f"Failed to generate query embedding from query_text: {e}",
}
if json_output:
print(json.dumps(err))
else:
print(json.dumps(err, indent=2))
return
if not parameters.get("jurisdiction"):
parameters["jurisdiction"] = "both"
try:
from ipfs_datasets_py.processors.legal_scrapers.legal_dataset_api import (
search_court_rules_corpus_from_parameters,
)
result = anyio.run(search_court_rules_corpus_from_parameters, parameters)
if json_output:
print(json.dumps(result))
else:
print(json.dumps(result, indent=2))
except Exception as e:
err = {"status": "error", "error": str(e)}
if json_output:
print(json.dumps(err))
else:
print(json.dumps(err, indent=2))
return
if subcommand in (
"search-federal-register",
"federal-register-search",
"search_federal_register",
):
tool_args = args[2:]
parameters = parse_tool_args(tool_args)
parameters = {str(k).replace("-", "_"): v for k, v in parameters.items()}
if not parameters.get("collection_name"):
print(
"Usage: ipfs-datasets legal search-federal-register --collection_name NAME (--query_vector '[...]' | --query_text '...')"
)
return
if "query_vector" not in parameters:
query_text = str(parameters.get("query_text") or "").strip()
if not query_text:
print(
"Usage: ipfs-datasets legal search-federal-register --collection_name NAME (--query_vector '[...]' | --query_text '...')"
)
return
try:
from ipfs_datasets_py.embeddings_router import embed_text
embedding_model = (
str(parameters.get("embedding_model") or parameters.get("model_name") or "").strip()
or "thenlper/gte-small"
)
embedding_provider = str(parameters.get("embedding_provider") or "").strip() or None
query_vector = embed_text(
query_text,
model_name=embedding_model,
provider=embedding_provider,
)
parameters["query_vector"] = query_vector
except Exception as e:
err = {
"status": "error",
"error": f"Failed to generate query embedding from query_text: {e}",
}
if json_output:
print(json.dumps(err))
else:
print(json.dumps(err, indent=2))
return
try:
from ipfs_datasets_py.processors.legal_scrapers.legal_dataset_api import (
search_federal_register_corpus_from_parameters,
)
result = anyio.run(search_federal_register_corpus_from_parameters, parameters)
if json_output:
print(json.dumps(result))
else:
print(json.dumps(result, indent=2))
except Exception as e:
err = {"status": "error", "error": str(e)}
if json_output:
print(json.dumps(err))
else:
print(json.dumps(err, indent=2))
return
if subcommand in (
"scrape-netherlands-laws",
"netherlands-laws-scrape",
"scrape_netherlands_laws",
):
tool_args = args[2:]
parameters = parse_tool_args(tool_args)
parameters = {str(k).replace("-", "_"): v for k, v in parameters.items()}
if (
not parameters.get("document_urls")
and not parameters.get("seed_urls")
and not parameters.get("use_default_seeds")
):
print(
"Usage: ipfs-datasets legal scrape-netherlands-laws (--document_urls '[\"https://wetten.overheid.nl/BWBR...\"]' | --seed_urls '[\"https://wetten.overheid.nl/zoeken/zoekresultaat/...\" ]' | --use_default_seeds true) [--output_dir PATH] [--max_documents N] [--max_seed_pages N] [--crawl_depth N] [--rate_limit_delay SECONDS] [--skip_existing true]"
)
return
try:
from ipfs_datasets_py.processors.legal_scrapers.legal_dataset_api import (
scrape_netherlands_laws_from_parameters,
)
result = anyio.run(scrape_netherlands_laws_from_parameters, parameters)
if json_output:
print(json.dumps(result))
else:
print(json.dumps(result, indent=2))
except Exception as e:
err = {"status": "error", "error": str(e)}
if json_output:
print(json.dumps(err))
else:
print(json.dumps(err, indent=2))
return
print("Usage: ipfs-datasets legal search-court-rules --collection_name NAME (--query_vector '[...]' | --query_text '...') [--jurisdiction federal|state|both] [--state OR]")
print(" ipfs-datasets legal search-federal-register --collection_name NAME (--query_vector '[...]' | --query_text '...')")
print(" ipfs-datasets legal scrape-netherlands-laws (--document_urls '[\"https://wetten.overheid.nl/BWBR...\"]' | --seed_urls '[\"https://wetten.overheid.nl/zoeken/zoekresultaat/...\" ]' | --use_default_seeds true)")
return
if command == "tools":
subcommand = args[1] if len(args) > 1 else None
# parse global option --config first
config_override = None
if "--config" in args:
try:
idx = args.index("--config")
if idx + 1 < len(args):
config_override = args[idx + 1]
args = args[:idx] + args[idx+2:]
except Exception:
pass
host, port = _default_host_port(config_override)
# Handle enhanced "tools run" command
if subcommand == "run":
try:
import anyio
runner = DynamicToolRunner()
# Parse: tools run <category> <tool> [--arg value ...]
if len(args) < 4:
print("Usage: ipfs-datasets tools run <category> <tool> [--arg value ...]")
print("\nExamples:")
print(" ipfs-datasets tools run dataset_tools load_dataset --source squad")
print(" ipfs-datasets tools run ipfs_tools get_from_ipfs --cid QmHash123")
print(" ipfs-datasets tools run vector_tools create_vector_index --data 'text'")
return
category = args[2]
tool = args[3]
tool_args = args[4:]
# Parse tool arguments
kwargs = parse_tool_args(tool_args)
# Run the tool
result = anyio.run(runner.run_tool, category, tool, **kwargs)
# Print result
print_result(result, "json" if json_output else "pretty")
return
except Exception as e:
print(f"Error running tool: {e}")
import traceback
traceback.print_exc()
return
# Parse common options for tools
extra = args[2:]
i = 0
params_json = None
category_arg = None
tool_arg = None
while i < len(extra):
token = extra[i]
if token in ("--host", "-H") and i + 1 < len(extra):
host = str(extra[i + 1])
i += 2
elif token in ("--port", "-p") and i + 1 < len(extra):
port = str(extra[i + 1])
i += 2
elif token in ("--params", "-d") and i + 1 < len(extra):
params_json = extra[i + 1]
i += 2
else:
# Positional capture for category and tool for list/execute
if category_arg is None:
category_arg = token
elif tool_arg is None:
tool_arg = token
i += 1
base = f"http://{host}:{port}/api/mcp"
try:
import requests
if subcommand == "categories":
# Use enhanced discovery if MCP server is not running
try:
r = requests.get(f"{base}/tools", timeout=3)
r.raise_for_status()
data = r.json()
cats = sorted(list(data.keys())) if isinstance(data, dict) else []
except Exception:
# Fallback to local discovery
runner = DynamicToolRunner()
cats = runner.get_categories()
if json_output:
print(json.dumps({"categories": cats}))
else:
print("Available tool categories:")
runner = DynamicToolRunner()
for c in cats:
count = runner.get_tool_count(c)
print(f" {c} ({count} tools)")
return
elif subcommand == "list":
if not category_arg:
print("Usage: ipfs-datasets tools list <category> [--host H --port P]")
return
# Try MCP server first, fallback to local discovery
try:
r = requests.get(f"{base}/tools", timeout=3)
r.raise_for_status()
data = r.json()
tools = data.get(category_arg, []) if isinstance(data, dict) else []
except Exception:
# Fallback to local discovery
runner = DynamicToolRunner()
tool_names = runner.get_tools(category_arg)
tools = [{"name": name} for name in tool_names]
if not tools:
print(f"No tools found for category '{category_arg}'")
return
if json_output:
print(json.dumps({"category": category_arg, "tools": tools}))
else:
print(f"Tools in '{category_arg}':")
for t in tools:
if isinstance(t, dict):
name = t.get("name", "unknown")
desc = t.get("description", "")
print(f" {name}: {desc}" if desc else f" {name}")
else:
print(f" {t}")
return
elif subcommand == "describe":
if not category_arg or not tool_arg:
print("Usage: ipfs-datasets tools describe <category> <tool> [--host H --port P]")
return
url = f"{base}/tools/{category_arg}/{tool_arg}"
r = requests.get(url, timeout=5)
if r.ok:
if json_output:
print(r.text)
else:
try:
print(json.dumps(r.json(), indent=2))
except Exception:
print(r.text)
else:
print(f"HTTP {r.status_code}: {r.text[:200]}")
return
elif subcommand == "execute":
if not category_arg or not tool_arg:
print("Usage: ipfs-datasets tools execute <category> <tool> [--params JSON] [--host H --port P]")
return
try:
body = json.loads(params_json) if params_json else {}
except Exception as e:
print(f"Invalid JSON for --params: {e}")
return
url = f"{base}/tools/{category_arg}/{tool_arg}/execute"
r = requests.post(url, json=body, timeout=15)
if r.ok:
if json_output:
print(r.text)
else:
try:
print(json.dumps(r.json(), indent=2))
except Exception:
print(r.text)
else:
print(f"HTTP {r.status_code}: {r.text[:200]}")
return
except Exception as e:
print(f"Tools command failed: {e}")
return
if command == "ipfs":
subcommand = args[1] if len(args) > 1 else None
config_override = None
if "--config" in args:
try:
idx = args.index("--config")
if idx + 1 < len(args):
config_override = args[idx + 1]
args = args[:idx] + args[idx+2:]
except Exception:
pass
host, port = _default_host_port(config_override)
extra = args[2:]
i = 0
out_path = None
path_arg = None
cid_arg = None
gateway = None
# load default gateway from env or config
if not gateway:
gw_env = os.environ.get("IPFS_HTTP_GATEWAY") or os.environ.get("IPFS_DATASETS_IPFS_GATEWAY")
if gw_env:
gateway = gw_env
else:
cfg = _load_cli_config(config_override)
gateway = cfg.get("gateway") if isinstance(cfg, dict) else None
while i < len(extra):
token = extra[i]
if token in ("--host", "-H") and i + 1 < len(extra):
host = str(extra[i + 1])
i += 2
elif token in ("--port", "-p") and i + 1 < len(extra):
port = str(extra[i + 1])
i += 2
elif token in ("--out", "-o") and i + 1 < len(extra):
out_path = extra[i + 1]
i += 2
elif token in ("--gateway", "-g") and i + 1 < len(extra):
gateway = extra[i + 1]
i += 2
else:
# positional capture
if cid_arg is None and subcommand == "get":
cid_arg = token
elif path_arg is None and subcommand == "pin":
path_arg = token
i += 1
base = f"http://{host}:{port}/api/mcp/tools"
try:
import requests
if subcommand == "get":
if not cid_arg:
print("Usage: ipfs-datasets ipfs get <cid> [--out PATH] [--host H --port P]")
return
url = f"{base}/ipfs_tools/get_from_ipfs/execute"
body = {"cid": cid_arg}
if out_path:
body["output_path"] = out_path
if gateway:
body["gateway"] = gateway
r = requests.post(url, json=body, timeout=30)
if r.ok:
if json_output:
print(r.text)
else:
try:
print(json.dumps(r.json(), indent=2))
except Exception:
print(r.text)
else:
print(f"HTTP {r.status_code}: {r.text[:200]}")