-
Notifications
You must be signed in to change notification settings - Fork 526
/
client.conf
313 lines (282 loc) · 10.8 KB
/
client.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
##### mdsOpt
# RPC total retry time with MDS
mdsOpt.mdsMaxRetryMS=16000
# The maximum timeout of RPC communicating with MDS.
# The timeout of exponential backoff cannot exceed this value
mdsOpt.rpcRetryOpt.maxRPCTimeoutMS=2000
# RPC timeout for once communication with MDS
mdsOpt.rpcRetryOpt.rpcTimeoutMs=500
# RPC with mds needs to sleep for a period of time before each retry
mdsOpt.rpcRetryOpt.rpcRetryIntervalUS=50000
# Switch if the number of consecutive retries on the current MDS exceeds the limit.
# The number of failures includes the number of timeout retries
mdsOpt.rpcRetryOpt.maxFailedTimesBeforeChangeAddr=2
# The normal retry times for trigger wait strategy
mdsOpt.rpcRetryOpt.normalRetryTimesBeforeTriggerWait=3
# Sleep interval for wait
mdsOpt.rpcRetryOpt.waitSleepMs=1000
mdsOpt.rpcRetryOpt.addrs=127.0.0.1:6700,127.0.0.1:6701,127.0.0.1:6702 # __ANSIBLE_TEMPLATE__ {{ groups.mds | join_peer(hostvars, "mds_listen_port") }} __ANSIBLE_TEMPLATE__
#
# lease options
#
# client and mds lease time, default is 20s
mds.leaseTimesUs=20000000
mds.refreshTimesPerLease=5
#### metaCacheOpt
# Gets the number of retries for the leader
metaCacheOpt.metacacheGetLeaderRetry=3
# Need to sleep for a period of time before each get leader retry
metaCacheOpt.metacacheRPCRetryIntervalUS=100000
# RPC timeout of get leader
metaCacheOpt.metacacheGetLeaderRPCTimeOutMS=1000
#### executorOpt
# executorOpt rpc with metaserver
# rpc retry times with metaserver
executorOpt.maxRetry=4294967295
# internal rpc retry times with metaserver
executorOpt.maxInternalRetry=3
# Retry sleep time between failed RPCs
executorOpt.retryIntervalUS=100000
# RPC timeout for communicating with metaserver
executorOpt.rpcTimeoutMS=1000
# RPC stream idle timeout
executorOpt.rpcStreamIdleTimeoutMS=500
# The maximum timeout RPC time of the retry request.
# The timeout time will follow the exponential backoff policy.
# Because the timeout occurs when the network is congested, the RPC timeout needs to be increased
executorOpt.maxRPCTimeoutMS=8000
# Maximum sleep time between retry requests.
# when the network is congested or the metaserver is overloaded,
# it is necessary to increase the sleep time
executorOpt.maxRetrySleepIntervalUS=8000000
executorOpt.minRetryTimesForceTimeoutBackoff=5
executorOpt.maxRetryTimesBeforeConsiderSuspend=20
# batch limit of get inode attr and xattr
executorOpt.batchInodeAttrLimit=10000
#### bdev
# curve client's config file
bdev.confPath=/etc/curve/client.conf
#### extentManager
extentManager.preAllocSize=65536
#### brpc
# close socket after defer.close.second
rpc.defer.close.second=1
# rpc health check interval in second, 0 or negative value means disable health check
rpc.healthCheckIntervalSec=0
#### fuseClient
# TODO(xuchaojie): add unit
fuseClient.listDentryLimit=65536
fuseClient.downloadMaxRetryTimes=3
### kvcache opt
fuseClient.supportKVcache=false
fuseClient.setThreadPool=4
fuseClient.getThreadPool=4
# you shoudle enable it when mount one filesystem to multi mountpoints,
# it gurantee the consistent of file after rename, otherwise you should
# disable it for performance.
fuseClient.enableMultiMountPointRename=true
# the rename transaction models are different between version 1 and version 2
# the v2 version greatly improves the performance of rename, especially in concurrent scenarios.
# Node: v1 and v2 are incompatible and cannot be directly upgraded from a v1 cluster to v2.
fuseClient.txVersion=1
# splice will bring higher performance in some cases
# but there might be a kernel issue that will cause kernel panic when enabling it
# see https://lore.kernel.org/all/CAAmZXrsGg2xsP1CK+cbuEMumtrqdvD-NKnWzhNcvn71RV3c1yw@mail.gmail.com/
# until this issue has been fixed, splice should be disabled
fuseClient.enableSplice=false
# thread number of listDentry when get summary xattr
fuseClient.listDentryThreads=10
# default data(s3ChunkInfo/volumeExtent) size in inode, if exceed will eliminate and try to get the merged one
fuseClient.maxDataSize=1024
# default refresh data interval 30s
fuseClient.refreshDataIntervalSec=30
fuseClient.warmupThreadsNum=10
# the write throttle bps of fuseClient, default no limit
fuseClient.throttle.avgWriteBytes=0
# the write burst bps of fuseClient, default no limit
fuseClient.throttle.burstWriteBytes=0
# the times that write burst bps can continue, default 180s
fuseClient.throttle.burstWriteBytesSecs=180
# the write throttle iops of fuseClient, default no limit
fuseClient.throttle.avgWriteIops=0
# the write burst iops of fuseClient, default no limit
fuseClient.throttle.burstWriteIops=0
# the times that write burst Iops can continue, default 180s
fuseClient.throttle.burstWriteIopsSecs=180
# the read throttle bps of fuseClient, default no limit
fuseClient.throttle.avgReadBytes=0
# the read burst bps of fuseClient, default no limit
fuseClient.throttle.burstReadBytes=0
# the times that read burst bps can continue, default 180s
fuseClient.throttle.burstReadBytesSecs=180
# the read throttle iops of fuseClient, default no limit
fuseClient.throttle.avgReadIops=0
# the read burst Iops of fuseClient, default no limit
fuseClient.throttle.burstReadIops=0
# the times that read burst Iops can continue, default 180s
fuseClient.throttle.burstReadIopsSecs=180
#### vfs (virtual filesystem)
#{
vfs.userPermission.uid=0
vfs.userPermission.gids=0
vfs.userPermission.umask=0022
vfs.entryCache.lruSize=2000000
vfs.attrCache.lruSize=2000000
#}
#### filesystem metadata
# {
# fs.disableXAttr:
# if you want to get better metadata performance,
# you can mount fs with |fs.disableXAttr| is true
#
# fs.lookupCache.negativeTimeoutSec:
# entry which not found will be cached if |timeout| > 0
fs.cto=true
fs.maxNameLength=255
fs.disableXAttr=true
fs.accessLogging=true
fs.kernelCache.attrTimeoutSec=3600
fs.kernelCache.dirAttrTimeoutSec=3600
fs.kernelCache.entryTimeoutSec=3600
fs.kernelCache.dirEntryTimeoutSec=3600
fs.lookupCache.negativeTimeoutSec=0
fs.lookupCache.minUses=1
fs.lookupCache.lruSize=100000
fs.dirCache.lruSize=5000000
fs.openFile.lruSize=65536
fs.attrWatcher.lruSize=5000000
fs.rpc.listDentryLimit=65536
fs.deferSync.delay=3
fs.deferSync.deferDirMtime=false
# }
#### volume
volume.bigFileSize=1048576
volume.volBlockSize=4096
volume.fsBlockSize=4096
# allocator type, supported {bitmap}
volume.allocator.type=bitmap
## for bitmap allocator
# size of each bit, default is 4MiB
volume.bitmapAllocator.sizePerBit=4194304
# small allocation proportion [0-1]
volume.bitmapAllocator.smallAllocProportion=0.2
# number of block groups that allocated once
volume.blockGroup.allocateOnce=4
## spaceserver
# the space used by the blockgroup exceeds this percentage and can
# be returned to mds [0.8-1]
volume.space.useThreshold=0.95
# the background thread calculates the time interval for returning
# the blockgroup to mds
volume.space.releaseInterSec=300
#### s3
# this is for test. if s3.fakeS3=true, all data will be discarded
s3.fakeS3=false
s3.pageSize=65536
# prefetch blocks that disk cache use
s3.prefetchBlocks=1
# prefetch threads
s3.prefetchExecQueueNum=1
# start sleep when mem cache use ratio is greater than nearfullRatio,
# sleep time increase follow with mem cache use ratio, baseSleepUs is baseline.
s3.nearfullRatio=70
s3.baseSleepUs=500
# background thread schedule time
s3.threadScheduleInterval=3
# data cache flush wait time
s3.cacheFlushIntervalSec=5
# write cache < 8,388,608 (8MB) is not allowed
s3.writeCacheMaxByte=838860800
s3.readCacheMaxByte=209715200
# file cache read thread num
s3.readCacheThreads=5
# The data in the cache cluster download to local
s3.memClusterToLocal=true
# The data in the s3 storage download to local
s3.s3ToLocal=true
# read size bigger than this value will read until prefetch is finished
s3.bigIoSize=131072
# retry times when read big io failed
s3.bigIoRetryTimes=100
# retry interval when read big io failed
s3.bigIoRetryIntervalUs=100
# http = 0, https = 1
s3.http_scheme=0
s3.verify_SSL=False
s3.user_agent=S3 Browser
s3.region=us-east-1
s3.maxConnections=500
s3.connectTimeout=60000
s3.requestTimeout=10000
# Off = 0,Fatal = 1,Error = 2,Warn = 3,Info = 4,Debug = 5,Trace = 6
s3.logLevel=4
s3.logPrefix=/data/logs/curvefs/aws_ # __CURVEADM_TEMPLATE__ /curvefs/client/logs/aws_ __CURVEADM_TEMPLATE__
s3.asyncThreadNum=500
# limit all inflight async requests' bytes, |0| means not limited
s3.maxAsyncRequestInflightBytes=1073741824
s3.chunkFlushThreads=5
# throttle
s3.throttle.iopsTotalLimit=0
s3.throttle.iopsReadLimit=0
s3.throttle.iopsWriteLimit=0
s3.throttle.bpsTotalMB=0
s3.throttle.bpsReadMB=0
s3.throttle.bpsWriteMB=0
s3.useVirtualAddressing=false
# The interval between read failures and retries will become larger and larger,
# and when the max is reached, retry will be performed at a fixed time.
s3.maxReadRetryIntervalMs = 1000
# retry interval
s3.readRetryIntervalMs = 100
# TODO(hongsong): limit bytes、iops/bps
#### disk cache options
# 0:not enable disk cache
# 1:readonly
# 2:read/write
diskCache.diskCacheType=2 # __ANSIBLE_TEMPLATE__ {{ client_disk_cache_type | default('2') }} __ANSIBLE_TEMPLATE__
# the file system writes files use flush or not
diskCache.forceFlush=true
# the interval of check to trim disk cache
diskCache.trimCheckIntervalSec=5
# the interval of check to trim load file to s3
diskCache.asyncLoadPeriodMs=5
# ok nearfull full
# |------------|-------------------|----------------------|
# 0 trimRatio*safeRatio safeRatio fullRatio
#
# 1. 0<=ok<trimRatio*safeRatio;
# 2. trimRatio*safeRatio<=nearfull<safeRatio
# 3. safeRatio<=full<=fullRatio
# If the status is ok or ok->nearfull does not clean up
# If the status is full or
# full->nearfull clean up
diskCache.fullRatio=90
diskCache.safeRatio=70
diskCache.trimRatio=50
diskCache.threads=5
# the max size disk cache can use
diskCache.maxUsableSpaceBytes=107374182400
# the max files that can cache
diskCache.maxFileNums=1000000
# the max time system command can run
diskCache.cmdTimeoutSec=300
# directory of disk cache
diskCache.cacheDir=/mnt/curvefs_cache # __CURVEADM_TEMPLATE__ /curvefs/client/data/cache __CURVEADM_TEMPLATE__ __ANSIBLE_TEMPLATE__ /mnt/curvefs_disk_cache/{{ 99999999 | random | to_uuid | upper }} __ANSIBLE_TEMPLATE__
# the write throttle bps of disk cache, default no limit
diskCache.avgFlushBytes=0
# the write burst bps of disk cache, default no limit
diskCache.burstFlushBytes=0
# the times that write burst bps can continue, default 180s
diskCache.burstSecs=180
# the write throttle iops of disk cache, default no limit
diskCache.avgFlushIops=0
# the read throttle bps of disk cache, default no limit
diskCache.avgReadFileBytes=0
# the read throttle iops of disk cache, default no limit
diskCache.avgReadFileIops=0
#### common
client.common.logDir=/data/logs/curvefs # __CURVEADM_TEMPLATE__ /curvefs/client/logs __CURVEADM_TEMPLATE__
# we have loglevel: {0,3,6,9}
# as the number increases, it becomes more and more detailed
client.loglevel=0
client.dummyServer.startPort=9000