Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
01518e2
Times extraction 1
basiav May 8, 2025
19637c2
updates:
basiav Jun 2, 2025
e1f2200
Working
basiav Jun 4, 2025
6e24d59
Second
basiav Jun 4, 2025
b0e69c2
Added
basiav Jun 5, 2025
2beea10
Works
basiav Jun 5, 2025
105a8c7
Done
basiav Jun 5, 2025
c11bdc3
All
basiav Jun 5, 2025
dce3d15
Changes in sampleset_metadata
basiav Jun 11, 2025
f78dbeb
Comm
basiav Jun 11, 2025
c027409
update
basiav Jun 11, 2025
6ce6ea2
Qommnunity demo notebook update
kacper3615 Jun 12, 2025
cd6a224
No default value for AdvantageSampler
kacper3615 Jun 12, 2025
6a16a14
Updated requirements
kacper3615 Jun 12, 2025
e89de6d
Added
basiav Jun 12, 2025
20b8297
Updates
basiav Jun 26, 2025
f21b3a5
Updates
basiav Jul 1, 2025
1f73a2f
First version to push
basiav Jul 2, 2025
ba4b2d9
Updates
basiav Jul 3, 2025
f27e5b4
Added time units, find_embedding time capturing for heuristic embedding
basiav Jul 5, 2025
f8ea3aa
Delete Qommunity/results_analyzer.py
basiav Jul 5, 2025
da47995
Fixed hierarchical_searcher error
kacper3615 Jul 15, 2025
51faac7
Full modularity matrix will calculate only once
kacper3615 Jul 15, 2025
334096d
PR suggestions applied
basiav Jul 16, 2025
c1a846b
Merge branch 'main' of https://github.com/kacper3615/Qommunity into b…
basiav Jul 21, 2025
059957c
Fix with return_metadata in HierarchicalSearcher
basiav Jul 21, 2025
db80fde
Update in the demo/sampleset_times_extraction/iterative_searcher_samp…
basiav Jul 21, 2025
7d4f4a9
Applied Black formatter
kacper3615 Nov 2, 2025
007e5c6
Progres
basiav Nov 12, 2025
04aadef
Updates:
basiav Nov 12, 2025
a7e9485
Saving plots
basiav Nov 24, 2025
ff42031
Updates
basiav Nov 25, 2025
6e2b8bc
Updates
basiav Dec 1, 2025
47e544f
Udpate
basiav Dec 1, 2025
de065ef
All
basiav Dec 1, 2025
ed6d0d3
Safety copy
basiav Dec 1, 2025
2eae861
Sampleset info gathering prototype working
basiav Dec 5, 2025
3f56f08
Rename powerlaw_169.ipynb to erdos_renyi_169.ipynb
basiav Dec 5, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
113 changes: 99 additions & 14 deletions Qommunity/iterative_searcher/iterative_hierarchical_searcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,12 @@
from tqdm import tqdm
import numpy as np
import warnings
import pickle

from Qommunity.samplers.hierarchical.advantage_sampler import AdvantageSampler
from Qommunity.searchers.utils import HierarchicalRunMetadata

METADATA_KEYARG = "return_metadata"


class MethodArgsWarning(Warning):
Expand Down Expand Up @@ -53,17 +59,29 @@ def _verify_kwargs(self, kwargs) -> dict:

return kwargs

def _check_sampler_and_it_searcher_metadata_flags_compatibility(
self, return_metadata_flag: bool
) -> bool:
return self.sampler.return_metadata and return_metadata_flag

def run(
self,
num_runs: int,
save_results: bool = True,
saving_path: str | None = None,
elapse_times: bool = True,
iterative_verbosity: int = 0,
return_metadata: bool = False,
**kwargs,
):
kwargs = self._verify_kwargs(kwargs)

if return_metadata and not self.sampler.return_metadata:
raise MethodArgsWarning(
f"Set Advantage sampler's {METADATA_KEYARG} flag to True before running."
+ f" HierarchicalIterativeSearcher with {METADATA_KEYARG}."
)

if iterative_verbosity >= 1:
print("Starting community detection iterations")

Expand All @@ -74,11 +92,19 @@ def run(
communities = np.empty((num_runs), dtype=object)
times = np.zeros((num_runs))

# List instead of samplesets_data = np.empty((num_runs), dtype=object)
# To prevent jupyter notebook kernel crashes
# as handling big objects is not efficient with numpy dtype=object arrs
samplesets_data = []
Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it a temporary solution?

Copy link
Copy Markdown
Collaborator Author

@basiav basiav Jul 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe, it may be expanded later with embeddings returning/nicer results interface, so you kind of sensed it right that it might be changed. But a list object for now. Is it ok? I was rather explaining why a list not np.empty like the rest.


for iter in tqdm(range(num_runs)):
elapsed = time()
result = self.searcher.hierarchical_community_search(**kwargs)
times[iter] = time() - elapsed

if METADATA_KEYARG in kwargs:
result, sampleset_metadata = result

try:
modularity_score = nx.community.modularity(
self.searcher.sampler.G,
Expand All @@ -91,18 +117,28 @@ def run(

communities[iter] = result
modularities[iter] = modularity_score
if return_metadata:
samplesets_data.append(sampleset_metadata)

if save_results:
np.save(f"{saving_path}_modularities", modularities)
np.save(f"{saving_path}_communities", communities)
if elapse_times:
np.save(f"{saving_path}_times", times)
if return_metadata:
# Pickle saving tends to be safer for big objects
with open(f"{saving_path}_samplesets_data.pkl", "wb") as f:
pickle.dump(samplesets_data, f)

if iterative_verbosity >= 1:
print(f"Iteration {iter} completed")

if elapse_times and return_metadata and sampleset_metadata:
return communities, modularities, times, sampleset_metadata
if elapse_times:
return communities, modularities, times
if return_metadata:
return communities, modularities, samplesets_data
return communities, modularities

def run_with_sampleset_info(
Expand All @@ -111,9 +147,16 @@ def run_with_sampleset_info(
save_results: bool = True,
saving_path: str | None = None,
iterative_verbosity: int = 0,
return_metadata: bool = True,
**kwargs,
):

if return_metadata and hasattr(self.sampler, "return_metadata") and not self.sampler.return_metadata:
raise MethodArgsWarning(
f"Set Advantage sampler's {METADATA_KEYARG} flag to True before running."
+ f" HierarchicalIterativeSearcher with {METADATA_KEYARG}."
)

if iterative_verbosity >= 1:
print("Starting community detection iterations")

Expand All @@ -125,21 +168,50 @@ def run_with_sampleset_info(
times = np.zeros((num_runs))
division_modularities = np.empty((num_runs), dtype=object)
division_trees = np.empty((num_runs), dtype=object)
samplesets_data = np.empty((num_runs), dtype=object)

if return_metadata and isinstance(self.sampler, AdvantageSampler):
kwargs[METADATA_KEYARG] = True
else:
return_metadata = False
kwargs[METADATA_KEYARG] = False

for iter in tqdm(range(num_runs)):
run_label = f"iter_{iter}"

elapsed = time()
(
communities_result,
div_tree,
div_modularities,
) = self.searcher.hierarchical_community_search(
result = self.searcher.hierarchical_community_search(
return_modularities=True,
division_tree=True,
saving_path=saving_path,
label=run_label,
**kwargs,
)

# Currently only AdvantageSampler among the hierarchical solvers
# provides sampleset metadata.
if (
isinstance(self.sampler, AdvantageSampler)
and self.sampler.return_metadata
and return_metadata
):
(
communities_result,
div_tree,
div_modularities,
sampleset_data,
) = result
else:
(
communities_result,
div_tree,
div_modularities,
) = result
times[iter] = time() - elapsed
division_trees[iter] = div_tree
division_modularities[iter] = div_modularities
if return_metadata:
samplesets_data[iter] = sampleset_data

try:
modularity_score = nx.community.modularity(
Expand All @@ -163,25 +235,38 @@ def run_with_sampleset_info(
f"{saving_path}_division_modularities",
division_modularities,
)
# Pickle saving tends to be safer for big objects
if return_metadata:
try:
sampleset_data.save_to_files(base_filename=f"{saving_path}_{run_label}")
except Exception as e:
print(f"Error while saving HierarchicalRunMetadata (sampleset_data) from iteration: {iter}", e)


if iterative_verbosity >= 1:
print(f"Iteration {iter} completed")

dtypes = [
("communities", object),
("modularity", np.float_),
("time", np.float_),
("modularity", np.float64),
("time", np.float64),
("division_tree", object),
("division_modularities", object),
]
sampleset_components = [
communities,
modularities,
times,
division_trees,
division_modularities,
]

if return_metadata:
dtypes.append(("samplesets_data", object))
sampleset_components.append(samplesets_data)

sampleset = np.rec.fromarrays(
[
communities,
modularities,
times,
division_trees,
division_modularities,
],
sampleset_components,
dtype=dtypes,
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,13 @@ def __init__(
resolution: float = 1,
community: list | None = None,
use_weights: bool = True,
version: str = "Advantage_system5.4",
region: str = "eu-central-1",
version: str | None = None,
region: str | None = None,
num_reads: int = 100,
chain_strength: float | None = None,
use_clique_embedding: bool = False,
elapse_times: bool = False,
return_metadata: bool = True,
) -> None:
if not community:
community = [*range(G.number_of_nodes())]
Expand All @@ -28,9 +30,22 @@ def __init__(
self.chain_strength = chain_strength
self.use_clique_embedding = use_clique_embedding
self._use_weights = use_weights
self.elapse_times = elapse_times
self.return_metadata = return_metadata

weight = "weight" if use_weights else None
network = Network(G, resolution=resolution, weight=weight, community=community)

if not hasattr(self, "_full_modularity_matrix"):
self._full_modularity_matrix = Network(
G, resolution=resolution, weight=weight, community=community
).calculate_full_modularity_matrix()
network = Network(
G,
resolution=resolution,
weight=weight,
community=community,
full_modularity_matrix=self._full_modularity_matrix,
)
problem = CommunityDetectionProblem(
network, communities=2, one_hot_encoding=False
)
Expand All @@ -41,18 +56,30 @@ def __init__(
num_reads=num_reads,
chain_strength=chain_strength,
use_clique_embedding=use_clique_embedding,
elapse_times=elapse_times,
)

def sample_qubo_to_dict(self) -> dict:
sample = self.advantage.solve()
def sample_qubo_to_dict(self, return_metadata: bool | None = None, label: str | None = None, saving_path: str | None = None) -> dict:
if return_metadata:
sample = self.advantage.solve(
return_metadata=self.return_metadata,
label = label,
saving_path=saving_path
)
else:
sample = self.advantage.solve()

variables = sorted(
[col for col in sample.probabilities.dtype.names if col.startswith("x")],
key=lambda x: int(x[1:]),
)
community = sample.probabilities[variables][0]

return dict(zip(variables, community))
result = dict(zip(variables, community))

if return_metadata:
return result, sample.sampleset_info
return result

def update_community(self, community: list) -> None:
self.__init__(
Expand All @@ -65,4 +92,9 @@ def update_community(self, community: list) -> None:
self.num_reads,
self.chain_strength,
self.use_clique_embedding,
self.elapse_times,
self.return_metadata,
)

def __str__(self):
return self.advantage.version + " " + self.advantage.region
2 changes: 1 addition & 1 deletion Qommunity/samplers/regular/dqm_sampler/dqm_sampler.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from QHyper.solvers.quantum_annealing.dqm import DQM
from QHyper.solvers.quantum_annealing.dwave.dqm import DQM
from QHyper.problems.community_detection import Network, CommunityDetectionProblem
import networkx as nx
from ..regular_sampler import RegularSampler
Expand Down
Loading