Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 93 additions & 0 deletions score/evaluation/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@

load("@rules_cc//cc:defs.bzl", "cc_binary")

alias(
name = "solid_violations",
actual = "//score/evaluation/solid:solid_violations",
)

alias(
name = "memory_issues",
actual = "//score/evaluation/memory:memory_issues",
)

alias(
name = "non_idiomatic_cpp",
actual = "//score/evaluation/idiomatic:non_idiomatic_cpp",
)

alias(
name = "modern_cpp_syntax",
actual = "//score/evaluation/modern_cpp:modern_cpp_syntax",
)

alias(
name = "design_patterns",
actual = "//score/evaluation/patterns:design_patterns",
)

alias(
name = "code_optimization",
actual = "//score/evaluation/optimization:code_optimization",
)

alias(
name = "safe_cpp",
actual = "//score/evaluation/safety:safe_cpp",
)

alias(
name = "security_issues",
actual = "//score/evaluation/security:security_issues",
)

alias(
name = "design_faults",
actual = "//score/evaluation/design:design_faults",
)

alias(
name = "concurrency_issues",
actual = "//score/evaluation/concurrency:concurrency_issues",
)

alias(
name = "exception_error_handling",
actual = "//score/evaluation/exception_handling:exception_error_handling",
)

alias(
name = "unit_test_quality",
actual = "//score/evaluation/unit_test_quality:unit_test_quality",
)

alias(
name = "template_metaprogramming_issues",
actual = "//score/evaluation/template_metaprogramming:template_metaprogramming_issues",
)

alias(
name = "unit_test_quality_test",
actual = "//score/evaluation/unit_test_quality:unit_test_quality_test",
)

cc_binary(
name = "evaluation_all",
srcs = ["evaluation_main.cpp"],
visibility = ["//visibility:public"],
deps = [
"//score/evaluation/concurrency:concurrency_issues",
"//score/evaluation/design:design_faults",
"//score/evaluation/exception_handling:exception_error_handling",
"//score/evaluation/idiomatic:non_idiomatic_cpp",
"//score/evaluation/memory:memory_issues",
"//score/evaluation/modern_cpp:modern_cpp_syntax",
"//score/evaluation/optimization:code_optimization",
"//score/evaluation/patterns:design_patterns",
"//score/evaluation/security:security_issues",
"//score/evaluation/safety:safe_cpp",
"//score/evaluation/solid:solid_violations",
"//score/evaluation/template_metaprogramming:template_metaprogramming_issues",
"//score/evaluation/unit_test_quality:unit_test_quality",
],
)
8 changes: 8 additions & 0 deletions score/evaluation/concurrency/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@

load("@rules_cc//cc:defs.bzl", "cc_library")

cc_library(
name = "concurrency_issues",
hdrs = ["concurrency_issues.h"],
visibility = ["//visibility:public"],
)
148 changes: 148 additions & 0 deletions score/evaluation/concurrency/concurrency_issues.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@


#ifndef SCORE_EVALUATION_CONCURRENCY_ISSUES_H
#define SCORE_EVALUATION_CONCURRENCY_ISSUES_H

#include <atomic>
#include <condition_variable>
#include <mutex>
#include <string>
#include <thread>

namespace score
{
namespace evaluation
{

class UnsafeCounter
{
public:
void Increment() { ++count_; }
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Critical

[Logic] UnsafeCounter::Increment() performs a read-modify-write operation (++count_) on a shared mutable integer (count_) without any synchronization mechanism (like a mutex or std::atomic). In a multi-threaded environment, this will lead to a data race, causing incorrect counts and undefined behavior.

Suggested change
void Increment() { ++count_; }
#include <atomic>
class UnsafeCounter
{
public:
void Increment() { ++count_; }
int Get() const { return count_; }
private:
std::atomic<int> count_{0}; // Use std::atomic for thread-safe operations
};
Context for Agents
[**Bug**] `UnsafeCounter::Increment()` performs a read-modify-write operation (`++count_`) on a shared mutable integer (`count_`) without any synchronization mechanism (like a mutex or `std::atomic`). In a multi-threaded environment, this will lead to a data race, causing incorrect counts and undefined behavior.

```suggestion
#include <atomic>

class UnsafeCounter
{
public:
    void Increment() { ++count_; }
    int  Get() const { return count_; }

private:
    std::atomic<int> count_{0}; // Use std::atomic for thread-safe operations
};
```

File: score/evaluation/concurrency/concurrency_issues.h
Line: 20

int Get() const { return count_; }

private:
int count_{0};
};

class ManualLockUser
{
public:
void Write(const std::string& data)
{
mtx_.lock();
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Critical

[Reliability] ManualLockUser::Write() uses manual mtx_.lock() and mtx_.unlock(). If ProcessData(data) throws an exception, mtx_.unlock() will be skipped, leaving the mutex locked indefinitely. This will cause a resource leak and lead to deadlocks for any other thread attempting to acquire the mutex.

Suggested change
mtx_.lock();
class ManualLockUser
{
public:
void Write(const std::string& data)
{
std::lock_guard<std::mutex> lock(mtx_); // RAII for mutex management
ProcessData(data);
}
private:
static void ProcessData(const std::string& /*data*/)
{
}
std::mutex mtx_;
};
Context for Agents
[**Bug**] `ManualLockUser::Write()` uses manual `mtx_.lock()` and `mtx_.unlock()`. If `ProcessData(data)` throws an exception, `mtx_.unlock()` will be skipped, leaving the mutex locked indefinitely. This will cause a resource leak and lead to deadlocks for any other thread attempting to acquire the mutex.

```suggestion
class ManualLockUser
{
public:
    void Write(const std::string& data)
    {
        std::lock_guard<std::mutex> lock(mtx_); // RAII for mutex management
        ProcessData(data);
    }

private:
    static void ProcessData(const std::string& /*data*/)
    {
    }

    std::mutex mtx_;
};
```

File: score/evaluation/concurrency/concurrency_issues.h
Line: 32

ProcessData(data);
mtx_.unlock();
}

private:
static void ProcessData(const std::string& /*data*/)
{
}

std::mutex mtx_;
};

class BrokenLazyInit
{
public:
void EnsureInitialised()
{
if (!initialised_)
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Critical

[Logic] BrokenLazyInit::EnsureInitialised() implements double-checked locking incorrectly. The initialised_ flag is a plain bool, not std::atomic<bool>. Without proper memory ordering, a thread might read a stale false value for initialised_ even after another thread has completed initialization, leading to multiple initializations of resource_ or reading a partially constructed resource_ (data race).

Suggested change
if (!initialised_)
#include <atomic>
class BrokenLazyInit
{
public:
void EnsureInitialised()
{
// First check (unsynchronized) can use relaxed memory order if initialised_ is atomic
if (!initialised_.load(std::memory_order_acquire))
{
std::lock_guard<std::mutex> lk(mtx_);
// Second check (synchronized) ensures only one thread initializes
if (!initialised_.load(std::memory_order_relaxed))
{
resource_ = 42;
initialised_.store(true, std::memory_order_release);
}
}
}
int GetResource() const { return resource_; }
private:
std::atomic<bool> initialised_{false}; // Use std::atomic for thread-safe flag
int resource_{0};
std::mutex mtx_;
};
Context for Agents
[**Bug**] `BrokenLazyInit::EnsureInitialised()` implements double-checked locking incorrectly. The `initialised_` flag is a plain `bool`, not `std::atomic<bool>`. Without proper memory ordering, a thread might read a stale `false` value for `initialised_` even after another thread has completed initialization, leading to multiple initializations of `resource_` or reading a partially constructed `resource_` (data race).

```suggestion
#include <atomic>

class BrokenLazyInit
{
public:
    void EnsureInitialised()
    {
        // First check (unsynchronized) can use relaxed memory order if initialised_ is atomic
        if (!initialised_.load(std::memory_order_acquire))
        {
            std::lock_guard<std::mutex> lk(mtx_);
            // Second check (synchronized) ensures only one thread initializes
            if (!initialised_.load(std::memory_order_relaxed))
            {
                resource_ = 42;
                initialised_.store(true, std::memory_order_release);
            }
        }
    }

    int GetResource() const { return resource_; }

private:
    std::atomic<bool> initialised_{false}; // Use std::atomic for thread-safe flag
    int               resource_{0};
    std::mutex        mtx_;
};
```

File: score/evaluation/concurrency/concurrency_issues.h
Line: 50

{
std::lock_guard<std::mutex> lk(mtx_);
if (!initialised_)
{
resource_ = 42;
initialised_ = true;
}
}
}

int GetResource() const { return resource_; }

private:
bool initialised_{false};
int resource_{0};
std::mutex mtx_;
};

class DeadlockProne
{
public:
void TransferAB(int amount)
{
std::lock_guard<std::mutex> la(mtx_a_);
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Critical

[Logic] DeadlockProne::TransferAB and TransferBA acquire locks (mtx_a_ and mtx_b_) in different orders. TransferAB locks mtx_a_ then mtx_b_, while TransferBA locks mtx_b_ then mtx_a_. If two threads call these functions concurrently, one calling TransferAB and the other TransferBA, they can acquire one lock each and then block indefinitely trying to acquire the other, resulting in a classic deadlock.

Suggested change
std::lock_guard<std::mutex> la(mtx_a_);
class DeadlockProne
{
public:
void TransferAB(int amount)
{
// Use std::lock to acquire multiple mutexes without deadlock
std::unique_lock<std::mutex> lk_a(mtx_a_, std::defer_lock);
std::unique_lock<std::mutex> lk_b(mtx_b_, std::defer_lock);
std::lock(lk_a, lk_b); // Atomically locks both mutexes
balance_a_ -= amount;
balance_b_ += amount;
}
void TransferBA(int amount)
{
// Ensure consistent lock ordering or use std::lock
std::unique_lock<std::mutex> lk_a(mtx_a_, std::defer_lock);
std::unique_lock<std::mutex> lk_b(mtx_b_, std::defer_lock);
std::lock(lk_a, lk_b); // Atomically locks both mutexes
balance_b_ -= amount;
balance_a_ += amount;
}
private:
std::mutex mtx_a_;
std::mutex mtx_b_;
int balance_a_{1000};
int balance_b_{1000};
};
Context for Agents
[**Bug**] `DeadlockProne::TransferAB` and `TransferBA` acquire locks (`mtx_a_` and `mtx_b_`) in different orders. `TransferAB` locks `mtx_a_` then `mtx_b_`, while `TransferBA` locks `mtx_b_` then `mtx_a_`. If two threads call these functions concurrently, one calling `TransferAB` and the other `TransferBA`, they can acquire one lock each and then block indefinitely trying to acquire the other, resulting in a classic deadlock.

```suggestion
class DeadlockProne
{
public:
    void TransferAB(int amount)
    {
        // Use std::lock to acquire multiple mutexes without deadlock
        std::unique_lock<std::mutex> lk_a(mtx_a_, std::defer_lock);
        std::unique_lock<std::mutex> lk_b(mtx_b_, std::defer_lock);
        std::lock(lk_a, lk_b); // Atomically locks both mutexes
        balance_a_ -= amount;
        balance_b_ += amount;
    }

    void TransferBA(int amount)
    {
        // Ensure consistent lock ordering or use std::lock
        std::unique_lock<std::mutex> lk_a(mtx_a_, std::defer_lock);
        std::unique_lock<std::mutex> lk_b(mtx_b_, std::defer_lock);
        std::lock(lk_a, lk_b); // Atomically locks both mutexes
        balance_b_ -= amount;
        balance_a_ += amount;
    }

private:
    std::mutex mtx_a_;
    std::mutex mtx_b_;
    int balance_a_{1000};
    int balance_b_{1000};
};
```

File: score/evaluation/concurrency/concurrency_issues.h
Line: 74

std::lock_guard<std::mutex> lb(mtx_b_);
balance_a_ -= amount;
balance_b_ += amount;
}

void TransferBA(int amount)
{
std::lock_guard<std::mutex> lb(mtx_b_);
std::lock_guard<std::mutex> la(mtx_a_);
balance_b_ -= amount;
balance_a_ += amount;
}

private:
std::mutex mtx_a_;
std::mutex mtx_b_;
int balance_a_{1000};
int balance_b_{1000};
};

class SpuriousWakeupBug
{
public:
void Wait()
{
std::unique_lock<std::mutex> lk(mtx_);
if (!ready_) cv_.wait(lk);
Comment on lines +98 to +101
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Recommended

[Reliability] SpuriousWakeupBug::Wait guards cv_.wait() with a single if. Condition variables may wake spuriously, so a one-time check allows the method to return even though ready_ is still false, leading to consumers running in an uninitialized state. Always re-check the predicate in a loop (or use the predicate overload) to make the wait robust:

std::unique_lock<std::mutex> lk(mtx_);
cv_.wait(lk, [this]() { return ready_; });
Context for Agents
`SpuriousWakeupBug::Wait` guards `cv_.wait()` with a single `if`. Condition variables may wake spuriously, so a one-time check allows the method to return even though `ready_` is still `false`, leading to consumers running in an uninitialized state. Always re-check the predicate in a loop (or use the predicate overload) to make the wait robust:

```cpp
std::unique_lock<std::mutex> lk(mtx_);
cv_.wait(lk, [this]() { return ready_; });
```


File: score/evaluation/concurrency/concurrency_issues.h
Line: 101

}

void Signal()
{
std::lock_guard<std::mutex> lk(mtx_);
ready_ = true;
cv_.notify_one();
}

private:
std::mutex mtx_;
std::condition_variable cv_;
bool ready_{false};
};

inline void StartDetachedThread()
{
std::string local_config = "config_data";

std::thread t([&local_config]()
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Critical

[Reliability] StartDetachedThread() captures the local variable local_config by reference ([&local_config]) in the lambda passed to std::thread. Since the thread is detached, it can outlive the StartDetachedThread() function's stack frame. When the detached thread later attempts to access local_config, it will be a dangling reference, leading to a use-after-free and undefined behavior.

Suggested change
std::thread t([&local_config]()
inline void StartDetachedThread()
{
std::string local_config = "config_data";
// Capture by value to ensure the string's lifetime is managed by the lambda/thread
std::thread t([local_config]()
{
(void)local_config;
});
t.detach();
}
Context for Agents
[**Bug**] `StartDetachedThread()` captures the local variable `local_config` by reference (`[&local_config]`) in the lambda passed to `std::thread`. Since the thread is detached, it can outlive the `StartDetachedThread()` function's stack frame. When the detached thread later attempts to access `local_config`, it will be a dangling reference, leading to a use-after-free and undefined behavior.

```suggestion
inline void StartDetachedThread()
{
    std::string local_config = "config_data";

    // Capture by value to ensure the string's lifetime is managed by the lambda/thread
    std::thread t([local_config]()
    {
        (void)local_config;
    });
    t.detach();
}
```

File: score/evaluation/concurrency/concurrency_issues.h
Line: 121

{
(void)local_config;
});
t.detach();
}

class TocTouAtomic
{
public:
void ConsumeIfAvailable()
{
if (count_.load() > 0)
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Important

[Logic] TocTouAtomic::ConsumeIfAvailable() has a Time-of-Check-Time-of-Use (TOCTOU) race condition. It first checks if (count_.load() > 0) and then decrements --count_. Between the load() and the decrement, another thread could decrement count_, potentially causing count_ to become negative. This violates data integrity.

Suggested change
if (count_.load() > 0)
class TocTouAtomic
{
public:
void ConsumeIfAvailable()
{
// Use atomic compare-and-swap or fetch_sub for a race-free decrement
int expected = count_.load(std::memory_order_relaxed);
while (expected > 0 && !count_.compare_exchange_weak(expected, expected - 1, std::memory_order_acquire, std::memory_order_relaxed)) {
// Loop until successful or count becomes 0
}
}
void Produce() { ++count_; }
private:
std::atomic<int> count_{0};
};
Context for Agents
[**Bug**] `TocTouAtomic::ConsumeIfAvailable()` has a Time-of-Check-Time-of-Use (TOCTOU) race condition. It first checks `if (count_.load() > 0)` and then decrements `--count_`. Between the `load()` and the decrement, another thread could decrement `count_`, potentially causing `count_` to become negative. This violates data integrity.

```suggestion
class TocTouAtomic
{
public:
    void ConsumeIfAvailable()
    {
        // Use atomic compare-and-swap or fetch_sub for a race-free decrement
        int expected = count_.load(std::memory_order_relaxed);
        while (expected > 0 && !count_.compare_exchange_weak(expected, expected - 1, std::memory_order_acquire, std::memory_order_relaxed)) {
            // Loop until successful or count becomes 0
        }
    }

    void Produce() { ++count_; }

private:
    std::atomic<int> count_{0};
};
```

File: score/evaluation/concurrency/concurrency_issues.h
Line: 133

{
--count_;
}
}

void Produce() { ++count_; }

private:
std::atomic<int> count_{0};
};

}
}

#endif // SCORE_EVALUATION_CONCURRENCY_ISSUES_H
8 changes: 8 additions & 0 deletions score/evaluation/design/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@

load("@rules_cc//cc:defs.bzl", "cc_library")

cc_library(
name = "design_faults",
hdrs = ["design_faults.h"],
visibility = ["//visibility:public"],
)
Loading
Loading