Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
.idea/
test-out/
/testconf/
/authaus.out
/log/
/testconf/
test-out/
/out
/test.log
/test-2025-08-07T06-17-16.613.log
/test-2025-08-07T06-18-32.789.log
Comment on lines +7 to +9
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should never have any *.log files in this repo. Please rather change this to

Suggested change
/test.log
/test-2025-08-07T06-17-16.613.log
/test-2025-08-07T06-18-32.789.log
*.log

/TestAuthRoleDB_MissingGroups_in_github_com_IMQS_authaus.test.exe
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should never have any *.exe files in this repo. Please rather change this to

Suggested change
/TestAuthRoleDB_MissingGroups_in_github_com_IMQS_authaus.test.exe
*.exe

4 changes: 3 additions & 1 deletion all_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ func getCentral(t *testing.T) *Central {
userStore = newDummyUserStore()
}

c := NewCentral(log.Stdout, nil, nil, userStore, permitDB, sessionDB, roleDB)
c := NewCentral(log.Stdout, nil, nil, userStore, permitDB, sessionDB, roleDB, nil)
c.Auditor = &dummyAuditor{}
if isBackendPostgresTest() {
c.DB = sqlDB
Expand Down Expand Up @@ -371,6 +371,8 @@ func sqlDeleteAllTables(db *sql.DB) error {
"DROP TABLE IF EXISTS oauthchallenge CASCADE",
"DROP TABLE IF EXISTS oauthsession CASCADE",
"DROP TABLE IF EXISTS migration_version CASCADE",
"DROP TABLE IF EXISTS authuserstats CASCADE",
"DROP TABLE IF EXISTS session_check_logs CASCADE",
}
for _, st := range statements {
if _, err := db.Exec(st); err != nil {
Expand Down
29 changes: 20 additions & 9 deletions auth.go
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,7 @@ type Central struct {
permitDB PermitDB
sessionDB SessionDB
roleGroupDB RoleGroupDB
UsageTracker *CheckUsageTracker
renameLock sync.Mutex
loginDelayMS uint64 // Number of milliseconds by which we increase the delay login every time the user enters invalid credentials
shuttingDown uint32
Expand All @@ -326,7 +327,7 @@ type Central struct {

// Create a new Central object from the specified pieces.
// roleGroupDB may be nil
func NewCentral(logfile string, ldap LDAP, msaad MSAADInterface, userStore UserStore, permitDB PermitDB, sessionDB SessionDB, roleGroupDB RoleGroupDB) *Central {
func NewCentral(logfile string, ldap LDAP, msaad MSAADInterface, userStore UserStore, permitDB PermitDB, sessionDB SessionDB, roleGroupDB RoleGroupDB, usageTracker *CheckUsageTracker) *Central {
c := &Central{}

c.OAuth.Initialize(c)
Expand Down Expand Up @@ -361,6 +362,10 @@ func NewCentral(logfile string, ldap LDAP, msaad MSAADInterface, userStore UserS
}
}

if usageTracker != nil {
c.UsageTracker = usageTracker
c.UsageTracker.Initialize(c.Log)
}
c.Log.Infof("Authaus successfully started up\n")

return c
Expand All @@ -369,13 +374,14 @@ func NewCentral(logfile string, ldap LDAP, msaad MSAADInterface, userStore UserS
// Create a new 'Central' object from a Config.
func NewCentralFromConfig(config *Config) (central *Central, err error) {
var (
db *sql.DB
ldap LDAP
msaad MSAADInterface
userStore UserStore
permitDB PermitDB
sessionDB SessionDB
roleGroupDB RoleGroupDB
db *sql.DB
ldap LDAP
msaad MSAADInterface
userStore UserStore
permitDB PermitDB
sessionDB SessionDB
roleGroupDB RoleGroupDB
checkUsageTracker *CheckUsageTracker
)
msaadUsed := config.MSAAD.ClientID != ""
ldapUsed := len(config.LDAP.LdapHost) > 0
Expand Down Expand Up @@ -460,8 +466,9 @@ func NewCentralFromConfig(config *Config) (central *Central, err error) {
oldPasswordHistorySize = defaultOldPasswordHistorySize
}
userStore.SetConfig(time.Duration(config.UserStore.PasswordExpirySeconds)*time.Second, oldPasswordHistorySize, config.UserStore.UsersExemptFromExpiring)
checkUsageTracker = NewCheckUsageTracker(config.UsageTracking, nil, db)

c := NewCentral(config.Log.Filename, ldap, msaad, userStore, permitDB, sessionDB, roleGroupDB)
c := NewCentral(config.Log.Filename, ldap, msaad, userStore, permitDB, sessionDB, roleGroupDB, checkUsageTracker)
c.DB = db
c.MaxActiveSessions = config.SessionDB.MaxActiveSessions
if config.SessionDB.SessionExpirySeconds != 0 {
Expand Down Expand Up @@ -1197,6 +1204,10 @@ func (x *Central) Close() {
x.roleGroupDB.Close()
x.roleGroupDB = nil
}
if x.UsageTracker != nil {
x.UsageTracker.Stop()
x.UsageTracker = nil
}
if x.DB != nil {
x.DB.Close()
}
Expand Down
18 changes: 18 additions & 0 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,23 @@ type ConfigUserStoreDB struct {
UsersExemptFromExpiring []string // List of users that are not subject to password expiry. Username will be used for comparison.
}

// UsageTrackingConfig controls session check usage tracking
type ConfigUsageTracking struct {
Enabled bool `json:"enabled"` // Enable/disable usage tracking
FlushIntervalSeconds int `json:"flush_interval_seconds"` // Flush interval in seconds (default: 60)
MaxEntries int `json:"max_entries"` // Maximum number of entries to keep in memory before flushing (default: 1000)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
MaxEntries int `json:"max_entries"` // Maximum number of entries to keep in memory before flushing (default: 1000)
MaxEntries int `json:"max_entries"` // Maximum number of entries to keep in memory before flushing (default: 10 000)

Test_MemDump bool `json:"test___mem_dump"` // If true, dump memory usage statistics to log on flush
}

func (x *ConfigUsageTracking) SetDefaults() {
if x.FlushIntervalSeconds <= 0 {
x.FlushIntervalSeconds = 60 // Default to 1 minute
}
if x.MaxEntries <= 0 {
x.MaxEntries = 10000 // Default to 10000 entries
}
}

/*
Configuration information. This is typically loaded from a .json config file.
*/
Expand All @@ -178,6 +195,7 @@ type Config struct {
UserStore ConfigUserStoreDB
OAuth ConfigOAuth
MSAAD ConfigMSAAD
UsageTracking ConfigUsageTracking
AuditServiceUrl string
EnableAccountLocking bool
MaxFailedLoginAttempts int
Expand Down
2 changes: 2 additions & 0 deletions docs/changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

## Current

* feat : Auth-check logger (ASG-4837)

## v1.3.8

* fix : Add fetch-all for UserStats to combat performance issues
Expand Down
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ require (
github.com/stretchr/testify v1.9.0
github.com/wI2L/jsondiff v0.6.1
golang.org/x/crypto v0.31.0
golang.org/x/text v0.21.0
)

require (
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ github.com/wI2L/jsondiff v0.6.1 h1:ISZb9oNWbP64LHnu4AUhsMF5W0FIj5Ok3Krip9Shqpw=
github.com/wI2L/jsondiff v0.6.1/go.mod h1:KAEIojdQq66oJiHhDyQez2x+sRit0vIzC9KeK0yizxM=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
Expand Down
3 changes: 3 additions & 0 deletions migrations.go
Original file line number Diff line number Diff line change
Expand Up @@ -282,6 +282,9 @@ func createMigrations() []migration.Migrator {

// 16. Add authuserstats table that will record user_id, enabled_date, disabled_date, last_login_date
simple(`CREATE TABLE authuserstats (user_id BIGINT PRIMARY KEY, enabled_date TIMESTAMP, disabled_date TIMESTAMP, last_login_date TIMESTAMP);`),

// 17. Add: session_check_logs
simple(`CREATE TABLE session_check_logs (id bigserial PRIMARY KEY, ts TIMESTAMP WITH TIME ZONE NOT NULL, session_token VARCHAR NOT NULL, user_id BIGINT NOT NULL);`),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@bosvos your prompt to Copilot did not mention how these logs are going to be read back. Which make this change to this library feel a bit half baked.

From my point of view. I can imagine three functions/use cases

func (t *CheckUsageTracker) FetchLogs(start time.Time, end time.Time, limit int64, offset int64) ([]*CheckLogEntry, error) { ... }

func (t *CheckUsageTracker) FetchLogsSession(start time.Time, end time.Time, limit int64, offset int64, session string) ([]*CheckLogEntry, error) { ... }

func (t *CheckUsageTracker) FetchLogsUser(start time.Time, end time.Time, limit int64, offset int64, userID int64) ([]*CheckLogEntry, error) { ... }

Based on those three functions we would need the following indexes added to this nr 17 db migration.

Suggested change
simple(`CREATE TABLE session_check_logs (id bigserial PRIMARY KEY, ts TIMESTAMP WITH TIME ZONE NOT NULL, session_token VARCHAR NOT NULL, user_id BIGINT NOT NULL);`),
simple(`CREATE TABLE session_check_logs (id bigserial PRIMARY KEY, ts TIMESTAMP WITH TIME ZONE NOT NULL, session_token VARCHAR NOT NULL, user_id BIGINT NOT NULL);
CREATE INDEX idx_session_check_logs_ts ON session_check_logs (ts);
CREATE INDEX idx_session_check_logs_ts_session ON session_check_logs (ts, session_token);
CREATE INDEX idx_session_check_logs_ts_user_id ON session_check_logs (ts, user_id);

BUT this would make the table quite index heavy... and while we should not need to worry about that anymore (after Copilot implements my changes to refactor the code to make use of a channel instead of an array for the log entries buffer, it will be very difficult to have the code block execution of the main thread), it still feels like this is going to become slower/more expensive as time goes on without making use of table partitioning or pruning of this table.

Either way, I think all we really need is the first index I recommended on ts, as that should filter out enough data to make the rest of the filtering very quick, even if needs to brute force the rest.

}
}

Expand Down
2 changes: 1 addition & 1 deletion msaad_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ func getCentralMSAAD(t *testing.T) *Central {
p.Roles = EncodePermit(groupIds)
permitDB.SetPermit(user.UserId, &p)

c := NewCentral(log.Stdout, nil, &msaad, userStore, permitDB, sessionDB, roleDB)
c := NewCentral(log.Stdout, nil, &msaad, userStore, permitDB, sessionDB, roleDB, nil)
da := &dummyAuditor{}
da.messages = []string{}
da.testing = t
Expand Down
204 changes: 204 additions & 0 deletions permitchecklog.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,204 @@
package authaus

import (
"database/sql"
"fmt"
"github.com/IMQS/log"
"sync"
"time"
)

// CheckLogEntry represents a single session check log entry
type CheckLogEntry struct {
Timestamp time.Time `json:"timestamp"`
SessionToken string `json:"session_token"`
UserId UserId `json:"user_id"`
}

// CheckUsageTracker manages in-memory storage and periodic flushing of session check logs
type CheckUsageTracker struct {
config ConfigUsageTracking
log *log.Logger
logs []CheckLogEntry
mutex sync.RWMutex
stopChan chan struct{}
doneChan chan struct{}
flushDoneChan chan struct{}
stopOnce sync.Once
flushTicker *time.Ticker
flushing bool // Track if a flush is in progress
db *sql.DB
droppedLogsCount int64
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe add droppedLogsCount to the database records, since there is no other exposure to it; it is only used in testing?

memDump []string // For testing purposes
}

// NewCheckUsageTracker creates a new usage tracker instance
func NewCheckUsageTracker(c ConfigUsageTracking, log *log.Logger, db *sql.DB) *CheckUsageTracker {
c.SetDefaults()
tracker := &CheckUsageTracker{
config: c,
log: log,
logs: make([]CheckLogEntry, 0),
stopChan: make(chan struct{}),
doneChan: make(chan struct{}),
flushDoneChan: make(chan struct{}, 1),
db: db, // Database connection will be set later
}
if tracker.config.Test_MemDump {
tracker.memDump = make([]string, 0)
}
return tracker
}

func (t *CheckUsageTracker) Initialize(logger *log.Logger) {
if logger != nil {
t.log = logger

if t.config.Enabled {
t.start()
}

if t.db == nil {
t.log.Warn("CheckUsageTracker initialized without a database connection, logs will not be persisted")
} else {
t.log.Info("CheckUsageTracker initialized with database connection")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My gut is telling me this should rather be a Debug. No need to see the expected result every time.

}
}
}

// LogCheck adds a check request to the in-memory log
func (t *CheckUsageTracker) LogCheck(sessionToken string, token *Token) {
if !t.config.Enabled {
return
}

entry := CheckLogEntry{
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bad bot. We should be handling the pointer to the struct, instead of the struct it self.

Suggested change
entry := CheckLogEntry{
entry := &CheckLogEntry{

Timestamp: time.Now().UTC(),
SessionToken: sessionToken,
UserId: token.UserId,
}

t.mutex.Lock()
if len(t.logs) >= t.config.MaxEntries {
t.droppedLogsCount++
// drop entry
} else {
t.logs = append(t.logs, entry)
}
t.mutex.Unlock()
Comment on lines +81 to +88
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bad bot. This type of logging should be sent on a channel, appending to an array is going to require a lot of unnecessary code/logic.

}

// start begins the periodic flush process
func (t *CheckUsageTracker) start() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bad bot. This function should be creating a log channel of type *CheckLogEntry and size int(math.Max(float64(t.config.MaxEntries), float64(1000))).

flushInterval := time.Duration(t.config.FlushIntervalSeconds) * time.Second
if flushInterval <= 0 {
flushInterval = 60 * time.Second // Default to 1 minute
}

t.flushTicker = time.NewTicker(flushInterval)

go func() {
defer close(t.doneChan)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bad bot. Make use of a sync.WaitGroup to allow the main thread to wait on this go routine instead of reinventing the wheel with a channel.

for {
select {
case <-t.flushTicker.C:
t.flush()
case <-t.stopChan:
t.flushTicker.Stop()
t.flush() // Final flush before stopping
return
}
}
Comment on lines +102 to +111
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bad bot.

  • There needs to be an array of *CheckLogEntry in this go routine that is pre-allocated with a capacity of t.config.MaxEntries.
  • In the select we need to be a wait on the log channel that returns a *CheckLogEntry that needs to be appended to the array of *CheckLogEntry, then the length of the array needs to be compared with t.config.MaxEntries. If equal or more we call t.persistLogs with the array and we reslice the array to zero length.
  • The case <-t.flushTicker.C: should be calling t.persistLogs and reslice the array to zero length. Please remove the call to the flush function.
  • The case <-t.stopChan: should replace the call to flush with t.persistLogs
Suggested change
for {
select {
case <-t.flushTicker.C:
t.flush()
case <-t.stopChan:
t.flushTicker.Stop()
t.flush() // Final flush before stopping
return
}
}
logs := make(*CheckLogEntry, 0, t.config.MaxEntries)
for {
select {
case entry := <-t.logChan:
logs = append(logs, entry)
if len(logs) >= t.config.MaxEntries {
err := t.persistLogs(logs)
if err != nil {
t.log.Errorf("Failed to persist auth check logs: %v", err)
return
}
logs = logs[:0]
}
case <-t.flushTicker.C:
err := t.persistLogs(logs)
if err != nil {
t.log.Errorf("Failed to persist auth check logs: %v", err)
return
}
logs = logs[:0]
case <-t.stopChan:
t.flushTicker.Stop()
err := t.persistLogs(logs) // Final flush before stopping
if err != nil {
t.log.Errorf("Failed to persist auth check logs: %v", err)
return
}
return
}
}

@bosvos so when we check the error of persistLogs, we MUST bring the service down, as the service failed to do something that it was explicitly configured to do... but I don't really know how to tell Copilot this. We could replace the returns that I placed in there with panics, but I am used to having an errChan or a error variable that the main thread is checking, logs and kills the service if it ever gets a value.

For this library those would have to be global variables, or passed to every "child" of Central, but I don't think this library has any of those. I am kinda leaning to wards just telling Copilot to just call panic... it feels the most straight forward.

}()
}

// Stop gracefully shuts down the usage tracker
func (t *CheckUsageTracker) Stop() {
t.stopOnce.Do(func() {
if t.flushTicker != nil && t.stopChan != nil {
close(t.stopChan)
<-t.doneChan // Wait for main flush management goroutine to finish
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bad bot. Make use of a sync.WaitGroup instead of reinventing the wheel with a channel.

<-t.flushDoneChan // Wait for any ongoing flush operation to complete
}
})
}

// flush writes the in-memory logs to persistent storage and clears the memory
// TODO : Implement a more generic persistence abstraction (e.g. Provider pattern)
func (t *CheckUsageTracker) flush() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bad bot. This entire function is the result of using an array instead of a channel to manage the *CheckLogEntrys. Please remove all of this unnecessary complexity (delete the function).

t.mutex.Lock()

if len(t.logs) == 0 || t.flushing {
t.mutex.Unlock()
return
}

// Create a copy of logs to persist
logsToPersist := make([]CheckLogEntry, len(t.logs))
copy(logsToPersist, t.logs)
logsCount := len(t.logs)

// Mark that we're flushing to prevent concurrent flushes
t.flushing = true
t.mutex.Unlock()

// Persist logs in a separate goroutine to avoid blocking
go func() {
err := t.persistLogs(logsToPersist)

// Handle the result of persistence
t.mutex.Lock()
t.flushing = false

if err != nil {
t.log.Errorf("Failed to persist check usage logs: %v", err)
// Keep the logs in memory for retry - don't clear them
} else {
// Only clear logs after successful persistence
// Check if new logs were added during persistence
if len(t.logs) >= logsCount {
// Remove the persisted logs (first logsCount entries)
t.logs = t.logs[logsCount:]
} else {
// Shouldn't happen, but clear all if somehow we have fewer logs
t.logs = t.logs[:0]
}
Copy link

Copilot AI Aug 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The condition len(t.logs) >= logsCount assumes logs are only appended during persistence. If logs are added faster than they can be persisted, this could incorrectly remove newer logs. Consider using a more precise approach like tracking which specific logs were persisted.

Suggested change
}
// Remove only the logs that were actually persisted (matching prefix)
removeCount := 0
for removeCount < len(t.logs) && removeCount < len(logsToPersist) {
if t.logs[removeCount] != logsToPersist[removeCount] {
break
}
removeCount++
}
t.logs = t.logs[removeCount:]

Copilot uses AI. Check for mistakes.
Copy link
Contributor Author

@bosvos bosvos Aug 11, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@copilot I don't understand what you mean by "assumes logs are only appended during persistence"...which is the only portion we care about when dealing with the clearing of the logs...after clearing "up to" the persistence count, the persistence is done and t.logs carries on being populated as normal - the goroutine would have exited.

}
t.mutex.Unlock()
t.flushDoneChan <- struct{}{}
}()
}

// persistLogs writes logs to persistent storage using authaus persistence abstraction
func (t *CheckUsageTracker) persistLogs(logs []CheckLogEntry) error {
if t.db != nil {
// Example SQL insert statement, assuming a table structure exists
stmt, err := t.db.Prepare("INSERT INTO session_check_logs (ts, session_token, user_id) VALUES ($1, $2, $3)")
if err != nil {
return err
}
defer stmt.Close()

for _, entry := range logs {
_, err = stmt.Exec(entry.Timestamp, entry.SessionToken, entry.UserId)
if err != nil {
return err
}
}
} else {
// this is for testing, NOT production
if t.config.Test_MemDump {
// If no database connection, just clear
for _, entry := range logs {
t.memDump = append(t.memDump, checkLogString(entry))
}
}
}
return nil
}

func checkLogString(entry CheckLogEntry) string {
return "Persisting CheckUsage: time=" + entry.Timestamp.Format(time.RFC3339) +
" token=" + entry.SessionToken +
" userId=" + fmt.Sprint(entry.UserId)
}
Loading