RabbitMQ Clustering with 1Master & 5Nodes
Recently i deployed and implement RabbitMQ with 1Master and 2Nodes (as slaves). Is it possible to clustering RabbitMQ with 1Master and 5Nodes? For the record in 1Master-2Nodes i used commands below and it works like a charm.
sudo rabbitmqctl set_policy ha-all ".*" '{"ha-mode":"all"}'
sudo rabbitmqctl set_policy ha-two "^two\." \
'{"ha-mode":"exactly","ha-params":2,"ha-sync-mode":"automatic"}'
sudo rabbitmqctl set_policy ha-nodes "^nodes\." \
'{"ha-mode":"nodes","ha-params":["rabbit@node02", "rabbit@node03"]}'
RabbitMQ Clustering (1 Master{node01} and 2 Slaves{node02,node03}
What changes should i perform on commands above to get my point? The point is RabbitMQ with 1Master and 5Nodes. Thank you so much for helping me.
See also questions close to this topic
-
Stomp in Javascript class, subscribe not working?
Good day,
I wanted to extend my old javascript knowledge and I saw that javascript supports classes since a long time.
So i wanted to write a class for a chat with RabbitMQ(STOMP)
Everything works, except the
subscribe
function. I know that the class is not perfect, but I wanted to practice .Here is my code :
class Chat { constructor(){ this.ws = new WebSocket('ws://127.0.0.1:15674/ws'); this.subscription = null; this.client = null; this.connect(); } connect() { this.client = Stomp.over(this.ws); this.client.connect('guest', 'guest', this.on_connect, this.on_error, '/'); this.client.heartbeat.outgoing = 20000; // client will send heartbeats every 20000ms this.client.heartbeat.incoming = 0; // client does not want to receive heartbeats from the server } on_connect() { this.subscribe('/queue/demouser'); } on_error(error){ console.log('Connection failed'); console.log(error); output.innerHTML += 'Connection failed!<br />'; } send(queue,message) { this.connect(); this.client.send(queue,{},JSON.stringify(message)); } subscribe(name){ this.client.subscribe( name, function (message){ console.log(message.body); $('#output').append(message.body + '<br />'); } ); } }
In the browser I see that I receive a message
Unhandled received MESSAGE: MESSAGE content-length:4 redelivered:false message-id:T_sub-0@@session-Deu_SSuxxVnd74oFnPfUIA@@1 destination:/exchange/demouser/demouser subscription:sub-0 content-length:4 test
Edit:
When i take the functional way, it works...
// Connection parameters var mq_username = "guest", mq_password = "guest", mq_vhost = "/", mq_url = 'ws://127.0.0.1:15674/ws', mq_queue = "/exchange/superadmin"; // This is where we print incomoing messages var output; // This will be called upon successful connection function on_connect() { output.innerHTML += 'Connected to RabbitMQ-Web-Stomp<br />'; console.log(client); client.subscribe(mq_queue, on_message); } // This will be called upon a connection error function on_connect_error() { output.innerHTML += 'Connection failed!<br />'; } // This will be called upon arrival of a message function on_message(m) { console.log('message received'); console.log(m); output.innerHTML += m.body + '<br />'; } // Create a client // Fetch output panel output = document.getElementById("output"); var ws = new WebSocket(mq_url); var client = Stomp.over(ws); // Connect client.connect( mq_username, mq_password, on_connect, on_connect_error, mq_vhost ); client.send('/queue/demouser',{test:"test"}); var sub =client.subscribe("/queue/superadmin",on_message ); $('.chatsubmit').on('click',function(){ let message = $('.inputfield').val(); client.send('/exchange/conversation.outgoing/superadmin',message); $('.output').innerHTML += message.toString() + '<br />'; $('.inputfield').val(''); });
Browser console:
<<< MESSAGE subscription:sub-0 destination:/exchange/conversation.outgoing/demouser message-id:T_sub-0@@session-jVUaOO5RvArCSrzSl7K2vw@@3 redelivered:false content-length:4 test� stomp.min.js:8:1739 message received
- Distributed Messaging: Bus vs Broker
-
Doc rabbitmq memory usage issue
[root@osc /]# /usr/local/rabbitmq-3.6.5/sbin/rabbitmqctl status Status of node rabbit@osc ... [{pid,162150}, {running_applications, [{rabbitmq_top,"RabbitMQ Top","3.6.5"}, {rabbitmq_management,"RabbitMQ Management Console","3.6.5"}, {rabbitmq_management_agent,"RabbitMQ Management Agent","3.6.5"}, {rabbitmq_web_dispatch,"RabbitMQ Web Dispatcher","3.6.5"}, {webmachine,"webmachine","1.10.3"}, {mochiweb,"MochiMedia Web Server","2.13.1"}, {inets,"INETS CXC 138 49","6.3"}, {ssl,"Erlang/OTP SSL application","8.0"}, {public_key,"Public key infrastructure","1.2"}, {amqp_client,"RabbitMQ AMQP Client","3.6.5"}, {syntax_tools,"Syntax tools","2.0"}, {compiler,"ERTS CXC 138 10","7.0"}, {asn1,"The Erlang ASN1 compiler version 4.0.3","4.0.3"}, {crypto,"CRYPTO","3.7"}, {rabbit,"RabbitMQ","3.6.5"}, {ranch,"Socket acceptor pool for TCP protocols.","1.2.1"}, {os_mon,"CPO CXC 138 46","2.4.1"}, {mnesia,"MNESIA CXC 138 12","4.14"}, {rabbit_common,[],"3.6.5"}, {xmerl,"XML parser","1.3.11"}, {sasl,"SASL CXC 138 11","3.0"}, {stdlib,"ERTS CXC 138 10","3.0"}, {kernel,"ERTS CXC 138 10","5.0"}]}, {os,{unix,linux}}, {erlang_version, "Erlang/OTP 19 [erts-8.0] [source] [64-bit] [smp:12:12] [async-threads:192] [hipe] [kernel-poll:true]\n"}, {memory, [{total,2149246176}, {connection_readers,2980888}, {connection_writers,316032}, {connection_channels,1040720}, {connection_other,2950312}, {queue_procs,2395096}, {queue_slave_procs,0}, {plugins,5075776}, {other_proc,14218200}, {mnesia,442952}, {mgmt_db,10889752}, {msg_index,149160}, {other_ets,2904120}, {binary,2064189840}, <--------- 2GB???? why? queues is only use 2.7M {code,24809744}, {atom,1033401}, {other_system,15850183}]}, {alarms,[]}, {listeners,[{clustering,25672,"::"},{amqp,5672,"::"}]}, {vm_memory_high_watermark,0.4}, {vm_memory_limit,13456659251}, {disk_free_limit,50000000}, {disk_free,215603683328}, {file_descriptors, [{total_limit,63900}, {total_used,128}, {sockets_limit,57508}, {sockets_used,123}]}, {processes,[{limit,1048576},{used,1892}]}, {run_queue,0}, {uptime,28157}, {kernel,{net_ticktime,60}}]
The binaries is so high So I follow https://www.rabbitmq.com/memory-use.html#breakdown-binaries
/usr/local/rabbitmq-3.6.5/sbin/rabbitmqctl eval 'recon:bin_leak(10).' Error: {undef,[{recon,bin_leak,"\n",[]}, {erl_eval,do_apply,6,[{file,"erl_eval.erl"},{line,670}]}, {rpc,'-handle_call_call/6-fun-0-',5, [{file,"rpc.erl"},{line,187}]}]}
- How to release memory? queue is used less about 2.7MB memory, but binaries is use 2GB. (memory leaks?)
- why this /usr/local/rabbitmq-3.6.5/sbin/rabbitmqctl eval 'recon:bin_leak(10).' command failed?
- plungin
-
How can I implement pam clustering algorithm using gower distance in sklearn?
I would like to implement the pam (KMedoid, method='pam') algorithm using gower distance.
My dataset contains mixed features, numeric and categorical, several cat features have 1000+ different values.
I have found a suitable gower distance implementation here: https://github.com/wwwjk366/gower/blob/master/gower/gower_dist.py
My problem is that the sklearn-extra implementation of PAM I use does not have the
metric='gower'
option implemented. So I try to create a callable, but I seem to find it hard how to plug things together.D = gower.gower_matrix(df_ext, cat_features=cat_mask) # cat_mask is a boolean list marking what the categorical features are in the df_ext # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html def get_gower(): return sklearn.metrics.pairwise_distances(D, metric='precomputed') # https://scikit-learn-extra.readthedocs.io/en/latest/generated/sklearn_extra.cluster.KMedoids.html kmedoids = sklearn_extra.cluster.KMedoids(df_ext, metric=get_gower, method='pam') kmedoids.fit(df_ext)
I get this ValueError:
ValueError Traceback (most recent call last) <ipython-input-13-9ae677cd636a> in <module> 1 # https://scikit-learn-extra.readthedocs.io/en/latest/generated/sklearn_extra.cluster.KMedoids.html 2 kmedoids = KMedoids(df_ext, metric=get_gower, method='pam') ----> 3 kmedoids.fit(df_ext) D:\ProgramFiles\anaconda3\lib\site-packages\sklearn_extra\cluster\_k_medoids.py in fit(self, X, y) 183 random_state_ = check_random_state(self.random_state) 184 --> 185 self._check_init_args() 186 X = check_array(X, accept_sparse=["csr", "csc"]) 187 if self.n_clusters > X.shape[0]: D:\ProgramFiles\anaconda3\lib\site-packages\sklearn_extra\cluster\_k_medoids.py in _check_init_args(self) 154 155 # Check n_clusters and max_iter --> 156 self._check_nonnegative_int(self.n_clusters, "n_clusters") 157 self._check_nonnegative_int(self.max_iter, "max_iter", False) 158 D:\ProgramFiles\anaconda3\lib\site-packages\sklearn_extra\cluster\_k_medoids.py in _check_nonnegative_int(self, value, desc, strict) 144 else: 145 negative = (value is None) or (value < 0) --> 146 if negative or not isinstance(value, (int, np.integer)): 147 raise ValueError( 148 "%s should be a nonnegative integer. " D:\ProgramFiles\anaconda3\lib\site-packages\pandas\core\generic.py in __nonzero__(self) 1327 1328 def __nonzero__(self): -> 1329 raise ValueError( 1330 f"The truth value of a {type(self).__name__} is ambiguous. " 1331 "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
I think I have a problem with my callable. Do you have any ideas what I do wrong?
-
Get percentage of remaining users after a specified date in R
I am working with a user generated data set (say it's app user data or service), and I cluster it based on user behaviour characteristics i.e. frequency of use. I would like to see how many, or what percentage of users stop using the app/service after a specific date and from what cluster they come from.
Here is an reproducible example which I hope is appropriate:-
library(Pareto) library(uuid) library(ggplot2) library(tidyverse) library(data.table) set.seed(1) n_users <- 100 n_rows <- 3650 relative_probs <- rPareto(n = n_users, t = 1, alpha = 0.3, truncation = 500) unique_ids <- UUIDgenerate(n = n_users) id_sample <- sample(unique_ids, size = n_rows, prob = relative_probs, replace = TRUE) Date<-seq(as.Date("2015-01-01"), as.Date("2015-12-31"), by = "1 day") Date<-sample(Date,size = n_rows,replace = T) df<-data.frame(id_sample,Date) df df<-df%>%arrange(Date) userData<-df%>% group_by(id_sample)%>% summarise(Count=n()) scaledData<-scale(userData[,2]) scaledData set.seed(15) clust<-kmeans(scaledData, centers=5, nstart = 15) userData$Cluster<-clust$cluster setDT(userData)[Cluster==1, ClusterName:="Cluster 1"] userData[Cluster==2, ClusterName:="Cluster 2"] userData[Cluster==3, ClusterName:="Cluster 3"] userData[Cluster==4, ClusterName:="Cluster 4"] userData[Cluster==5, ClusterName:="Cluster 5"] user_vars<-userData%>%select(id_sample,ClusterName) df<-merge(df,user_vars,by="id_sample") df$Month<-lubridate::month(df$Date) df%>% group_by(Month)%>% summarise(N_Users=n_distinct(id_sample))
I am wondering if there was a
dplyr
solution or something similar where I could set a Date as a threshold to see how many Users (as a percentage or count) that were in the data before the specified date, appear after the specified date and at cluster level. For instance, a solution that captures all the unique user ID's prior to the specified date, with the outcome showing what percentage of these users are still left in the data after the specified date (grouped by cluster level). -
Subscript out of bounds error when plotting clusters using dtwclust package in R
I have a matrix which stores time series data. The following is a sample of the data, however the full dataset contains 51 rows (all 50 states plus DC) and 19 columns (x2002 to x2020).
values <- c(0.2530405, .4177630, 0.2148716, 0.2813618, 0.3428116, 0.2632559, 0.3446410, 0.2098437, -0.4695727, -0.3565306, -0.3289565, -0.3368127, -0.3592110, -0.3224375, -0.3566633, -0.4619120, -0.3109503, -0.4533174, -0.3730507, -0.2361796, -0.2267539, -0.3795783, -0.2444547, -0.2738204, 2.7028753, 3.2245777, 3.0075026, 2.6390509, 2.1839651, 2.5488890, 2.6308931, 2.1176300, -0.3461997, -0.3703573, -0.4318429, -0.2505558, -0.2135082, -0.2795819, -0.2725068, -0.4350418, -0.6281951, -0.5362774, -0.5347293, -0.5668312, -0.4784224, -0.6795677, -0.5670547, -0.5559578) rnames <- c('AL', 'AR', 'AZ', 'CA', 'CO', 'CT') cnames <- paste0('x', 2002:2009) ts_df <- matrix(data = values, nrow = 6, ncol = 8) rownames(ts_df) <- rnames colnames(ts_df) <- cnames print(ts_df)
I am trying to plot time series clusters using the dtwclust package. With this sample of the dataset, the following runs and plots as expected:
plot(tsclust(ts_df, type="partitional", k=3L, distance="dtw", centroid="pam"), type = 'sc')
However, when the dataset is expanded to include all values, I receive the following error:
Error in types[[n]] : subscript out of bounds
Any suggestions would be greatly appreciated!
-
How to have highly available Moodle in Kubernetes?
Want to set up highly available Moodle in K8s (on-prem). I'm using Bitnami Moodle with helm charts.
After a successful Moodle installation, it becomes work. But when a K8s node down, Moodle web page displays/reverts/redirects to the Moodle installation web page. It's like a loop.
Persistence storage is rook-ceph. Moodle PVC is ReadriteMany where Mysql is ReadWriteOnce.
The following command was used to deploy Moodle.
helm install moodle --set global.storageClass=rook-cephfs,replicaCount=3,persistence.accessMode=ReadWriteMany,allowEmptyPassword=false,moodlePassword=Moodle123,mariadb.architecture=replication bitnami/moodle
Any help on this is appreciated.
Thanks.
-
High-Availability not working in Hadoop cluster
I am trying to move my non-HA namenode to HA. After setting up all the configurations for JournalNode by following the Apache Hadoop documentation, I was able to bring the namenodes up. However, the namenodes are crashing immediately and throwing the follwing error.
ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: Failed to start namenode. java.io.IOException: There appears to be a gap in the edit log. We expected txid 43891997, but got txid 45321534.
I tried to recover the edit logs, initialize the shared edits etc., but nothing works. I am not sure how to fix this problem without formatting namenode since I do not want to loose any data.
Any help is greatly appreciated. Thanking in advance.
-
Apache Kafka Consume from Slave/ISR node
I understand the concept of master/slave and data replication in Kafka, but i don't understand why consumers and producers will always be routed to a master node when writing/reading from a partition instead of being able to read from any ISR (in-sync replica)/slave.
The way i think about it, if all consumers are redirected to one single master node, then more hardware is required to handle read/write operations from large consumer groups/producers.
Is it possible to read and write in slave nodes or the consumers/producers will always reach out to the master node of that partition?