Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .evergreen/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -1273,7 +1273,7 @@ buildvariants:
- matrix_name: CSOT
matrix_spec:
ruby: "ruby-4.0"
mongodb-version: "8.0"
mongodb-version: ["8.0", "rapid"]
topology: replica-set-single-node
os: ubuntu2204
display_name: "CSOT - ${mongodb-version}"
Expand Down Expand Up @@ -1532,7 +1532,7 @@ buildvariants:
auth-and-ssl: "noauth-and-nossl"
ruby: ["ruby-4.0", "ruby-3.4", "ruby-3.3", "ruby-3.2", "ruby-3.1"]
topology: [replica-set, sharded-cluster]
mongodb-version: [ '6.0', '7.0', '8.0' ]
mongodb-version: [ '6.0', '7.0', '8.0', 'rapid' ]
os: ubuntu2204
fle: helper
display_name: "FLE: ${mongodb-version} ${topology} ${ruby}"
Expand Down
4 changes: 2 additions & 2 deletions .evergreen/config/standard.yml.erb
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ buildvariants:
- matrix_name: CSOT
matrix_spec:
ruby: <%= latest_ruby %>
mongodb-version: <%= latest_stable_mdb %>
mongodb-version: <%= stable_and_rapid %>
topology: replica-set-single-node
os: ubuntu2204
display_name: "CSOT - ${mongodb-version}"
Expand Down Expand Up @@ -350,7 +350,7 @@ buildvariants:
auth-and-ssl: "noauth-and-nossl"
ruby: <%= supported_mri_rubies_3_ubuntu %>
topology: [replica-set, sharded-cluster]
mongodb-version: [ '6.0', '7.0', '8.0' ]
mongodb-version: [ '6.0', '7.0', '8.0', 'rapid' ]
os: ubuntu2204
fle: helper
display_name: "FLE: ${mongodb-version} ${topology} ${ruby}"
Expand Down
116 changes: 116 additions & 0 deletions spec/integration/sdam_prose_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -64,4 +64,120 @@
configureFailPoint: 'failCommand', mode: 'off')
end
end

describe 'Connection Pool Backpressure' do
min_server_fcv '8.2'
require_topology :single

let(:subscriber) { Mrss::EventSubscriber.new }

let(:client) do
new_local_client(
SpecConfig.instance.addresses,
SpecConfig.instance.all_test_options.merge(
max_connecting: 100,
max_pool_size: 100,
),
).tap do |client|
client.subscribe(Mongo::Monitoring::CONNECTION_POOL, subscriber)
end
end

after do
sleep 1
admin_db = root_authorized_client.use('admin').database

if defined?(@prev_ingressConnectionEstablishmentRateLimiterEnabled) &&
defined?(@prev_ingressConnectionEstablishmentRatePerSec) &&
defined?(@prev_ingressConnectionEstablishmentBurstCapacitySecs) &&
defined?(@prev_ingressConnectionEstablishmentMaxQueueDepth)
admin_db.command(
setParameter: 1,
ingressConnectionEstablishmentRateLimiterEnabled: @prev_ingressConnectionEstablishmentRateLimiterEnabled,
)
admin_db.command(
setParameter: 1,
ingressConnectionEstablishmentRatePerSec: @prev_ingressConnectionEstablishmentRatePerSec,
)
admin_db.command(
setParameter: 1,
ingressConnectionEstablishmentBurstCapacitySecs: @prev_ingressConnectionEstablishmentBurstCapacitySecs,
)
admin_db.command(
setParameter: 1,
ingressConnectionEstablishmentMaxQueueDepth: @prev_ingressConnectionEstablishmentMaxQueueDepth,
)
else
# Fallback: at least disable the limiter if previous values were not captured.
admin_db.command(
setParameter: 1,
ingressConnectionEstablishmentRateLimiterEnabled: false,
)
end
end

it 'generates checkout failures when the ingress connection rate limiter is active' do
admin_db = root_authorized_client.use('admin').database

# Capture current ingress connection establishment parameters so they can be restored.
current_params = admin_db.command(
getParameter: 1,
ingressConnectionEstablishmentRateLimiterEnabled: 1,
ingressConnectionEstablishmentRatePerSec: 1,
ingressConnectionEstablishmentBurstCapacitySecs: 1,
ingressConnectionEstablishmentMaxQueueDepth: 1,
).first

@prev_ingressConnectionEstablishmentRateLimiterEnabled =
current_params['ingressConnectionEstablishmentRateLimiterEnabled']
@prev_ingressConnectionEstablishmentRatePerSec =
current_params['ingressConnectionEstablishmentRatePerSec']
@prev_ingressConnectionEstablishmentBurstCapacitySecs =
current_params['ingressConnectionEstablishmentBurstCapacitySecs']
@prev_ingressConnectionEstablishmentMaxQueueDepth =
current_params['ingressConnectionEstablishmentMaxQueueDepth']

# Enable the ingress rate limiter with test-specific values.
admin_db.command(
setParameter: 1,
ingressConnectionEstablishmentRateLimiterEnabled: true,
)
admin_db.command(
setParameter: 1,
ingressConnectionEstablishmentRatePerSec: 20,
)
admin_db.command(
setParameter: 1,
ingressConnectionEstablishmentBurstCapacitySecs: 1,
)
admin_db.command(
setParameter: 1,
ingressConnectionEstablishmentMaxQueueDepth: 1,
)

# Add a document so $where has something to process.
client.use('test')['test'].delete_many
client.use('test')['test'].insert_one({})

# Run 100 parallel find_one operations that contend for connections.
threads = 100.times.map do
Thread.new do
begin
client.use('test')['test'].find(
'$where' => 'function() { sleep(2000); return true; }'
).first
rescue StandardError
# Ignore connection errors (including checkout timeouts).
end
end
end
threads.each(&:join)

checkout_failed = subscriber.select_published_events(
Mongo::Monitoring::Event::Cmap::ConnectionCheckOutFailed
)

expect(checkout_failed.length).to be >= 10
end
end
end
4 changes: 2 additions & 2 deletions spec/integration/secondary_reads_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

end_stats = get_read_counters

end_stats[:secondary].should be_within(10).of(start_stats[:secondary])
end_stats[:secondary].should be_within(50).of(start_stats[:secondary])
end_stats[:primary].should >= start_stats[:primary] + 30
end
end
Expand All @@ -50,7 +50,7 @@

end_stats = get_read_counters

end_stats[:primary].should be_within(10).of(start_stats[:primary])
end_stats[:primary].should be_within(50).of(start_stats[:primary])
end_stats[:secondary].should >= start_stats[:secondary] + 30
end
end
Expand Down
11 changes: 10 additions & 1 deletion spec/mongo/retryable/token_bucket_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,16 @@
end

describe 'thread safety' do
let(:bucket) { described_class.new(capacity: 1000) }
# Use capacity 2000, start at 1000 tokens.
# With 500 consumes and 500 deposits, floor/ceiling cannot be hit:
# min possible = 1000 - 500 = 500 > 0 (all consumes succeed)
# max possible = 1000 + 500 = 1500 < 2000 (all deposits effective)
# So the net change is guaranteed to be 0, making the assertion reliable.
let(:bucket) do
b = described_class.new(capacity: 2000)
b.consume(1000)
b
end

def run_concurrent_operations(bucket)
threads = []
Expand Down
5 changes: 3 additions & 2 deletions spec/spec_tests/data/sdam_unified/minPoolSize-error.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ initialData: &initialData
documents: []

tests:
- description: Network error on minPoolSize background creation
- description: Server error on minPoolSize background creation
operations:
# Configure the initial monitor handshake to succeed but the
# first or second background minPoolSize establishments to fail.
Expand All @@ -38,7 +38,7 @@ tests:
- hello
- isMaster
appName: SDAMminPoolSizeError
closeConnection: true
errorCode: 91
- name: createEntities
object: testRunner
arguments:
Expand All @@ -54,6 +54,7 @@ tests:
heartbeatFrequencyMS: 10000
appname: SDAMminPoolSizeError
minPoolSize: 10
serverMonitoringMode: poll
serverSelectionTimeoutMS: 1000
- database:
id: &database database
Expand Down
132 changes: 132 additions & 0 deletions spec/spec_tests/data/sdam_unified/pool-clear-min-pool-size-error.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
---
description: pool-cleared-on-min-pool-size-population-error

schemaVersion: "1.4"

runOnRequirements:
# failCommand appName requirements
- minServerVersion: "4.4"
serverless: forbid
topologies: [ single ]

createEntities:
- client:
id: &setupClient setupClient
useMultipleMongoses: false

tests:
- description: Pool is cleared on authentication error during minPoolSize population
runOnRequirements:
# failCommand appName requirements
- auth: true
operations:
- name: failPoint
object: testRunner
arguments:
client: *setupClient
failPoint:
configureFailPoint: failCommand
mode:
times: 1
data:
failCommands:
- saslContinue
appName: authErrorTest
errorCode: 18
- name: createEntities
object: testRunner
arguments:
entities:
- client:
id: &client client
observeEvents:
- poolReadyEvent
- poolClearedEvent
- connectionClosedEvent
uriOptions:
appname: authErrorTest
minPoolSize: 1

- name: waitForEvent
object: testRunner
arguments:
client: *client
event:
poolReadyEvent: {}
count: 1

- name: waitForEvent
object: testRunner
arguments:
client: *client
event:
poolClearedEvent: {}
count: 1

- name: waitForEvent
object: testRunner
arguments:
client: *client
event:
connectionClosedEvent: {}
count: 1

- description: Pool is not cleared on handshake error during minPoolSize population
operations:
- name: failPoint
object: testRunner
arguments:
client: *setupClient
failPoint:
configureFailPoint: failCommand
mode:
skip: 1 # skip one to let monitoring thread to move pool to ready state
data:
failCommands:
- hello
- isMaster
appName: authErrorTest
closeConnection: true

- name: createEntities
object: testRunner
arguments:
entities:
- client:
id: &client client
observeEvents:
- poolReadyEvent
- poolClearedEvent
- connectionClosedEvent
uriOptions:
appname: authErrorTest
minPoolSize: 5
maxConnecting: 1
# ensure that once we've connected to the server, the failCommand won't
# be triggered by monitors and will only be triggered by handshakes
serverMonitoringMode: poll
heartbeatFrequencyMS: 1000000

- name: waitForEvent
object: testRunner
arguments:
client: *client
event:
poolReadyEvent: {}
count: 1

- name: waitForEvent
object: testRunner
arguments:
client: *client
event:
connectionClosedEvent: {}
count: 1

- name: assertEventCount
object: testRunner
arguments:
client: *client
event:
poolClearedEvent: {}
count: 0
Loading