code
stringlengths
41
2.04k
label_name
stringclasses
2 values
label
int64
0
1
def update_fence_device(params, request, session) if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end $logger.info "Updating fence device" $logger.info params param_line = getParamList(params) $logger.info param_line if not params[:resource_id] out, stderr, retval = run_cmd( session, PCS, "stonith", "create", params[:name], params[:resource_type], *param_line ) if retval != 0 return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out}) end return "{}" end if param_line.length != 0 out, stderr, retval = run_cmd( session, PCS, "stonith", "update", params[:resource_id], *param_line ) if retval != 0 return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out}) end end return "{}" end
CWE-384
1
def add_group(params, request, session) if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end rg = params["resource_group"] resources = params["resources"] output, errout, retval = run_cmd( session, PCS, "resource", "group", "add", rg, *(resources.split(" ")) ) if retval == 0 return 200 else return 400, errout end end
CWE-384
1
def get_crm_mon_dom(session) begin stdout, _, retval = run_cmd( session, CRM_MON, '--one-shot', '-r', '--as-xml' ) if retval == 0 return REXML::Document.new(stdout.join("\n")) end rescue $logger.error 'Failed to parse crm_mon.' end return nil end
CWE-384
1
def config_restore(params, request, session) if params[:name] code, response = send_request_with_token( session, params[:name], 'config_restore', true, {:tarball => params[:tarball]} ) else if not allowed_for_local_cluster(session, Permissions::FULL) return 403, 'Permission denied' end $logger.info "Restore node configuration" if params[:tarball] != nil and params[:tarball] != "" out = "" errout = "" status = Open4::popen4(PCS, "config", "restore", "--local") { |pid, stdin, stdout, stderr| stdin.print(params[:tarball]) stdin.close() out = stdout.readlines() errout = stderr.readlines() } retval = status.exitstatus if retval == 0 $logger.info "Restore successful" return "Succeeded" else $logger.info "Error during restore: #{errout.join(' ').strip()}" return errout.length > 0 ? errout.join(' ').strip() : "Error" end else $logger.info "Error: Invalid tarball" return "Error: Invalid tarball" end end end
CWE-384
1
def fence_device_metadata(params, request, session) if not allowed_for_local_cluster(session, Permissions::READ) return 403, 'Permission denied' end return 200 if not params[:resourcename] or params[:resourcename] == "" @fenceagent = FenceAgent.new(params[:resourcename]) @fenceagent.required_options, @fenceagent.optional_options, @fenceagent.advanced_options, @fenceagent.info = getFenceAgentMetadata(session, params[:resourcename]) @new_fenceagent = params[:new] erb :fenceagentform end
CWE-384
1
def get_acls(session, cib_dom=nil) unless cib_dom cib_dom = get_cib_dom(session) return {} unless cib_dom end acls = { 'role' => {}, 'group' => {}, 'user' => {}, 'target' => {} } cib_dom.elements.each('/cib/configuration/acls/*') { |e| type = e.name[4..-1] if e.name == 'acl_role' role_id = e.attributes['id'] desc = e.attributes['description'] acls[type][role_id] = {} acls[type][role_id]['description'] = desc ? desc : '' acls[type][role_id]['permissions'] = [] e.elements.each('acl_permission') { |p| p_id = p.attributes['id'] p_kind = p.attributes['kind'] val = '' if p.attributes['xpath'] val = "xpath #{p.attributes['xpath']}" elsif p.attributes['reference'] val = "id #{p.attributes['reference']}" else next end acls[type][role_id]['permissions'] << "#{p_kind} #{val} (#{p_id})" } elsif ['acl_target', 'acl_group'].include?(e.name) id = e.attributes['id'] acls[type][id] = [] e.elements.each('role') { |r| acls[type][id] << r.attributes['id'] } end } acls['user'] = acls['target'] return acls end
CWE-384
1
def resource_unclone(params, request, session) if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end unless params[:resource_id] return [400, 'resource_id has to be specified.'] end _, stderr, retval = run_cmd( session, PCS, 'resource', 'unclone', params[:resource_id] ) if retval != 0 return [400, 'Unable to unclone ' + "'#{params[:resource_id]}': #{stderr.join('')}" ] end return 200 end
CWE-384
1
def get_cluster_tokens(params, request, session) # pcsd runs as root thus always returns hacluster's tokens if not allowed_for_local_cluster(session, Permissions::FULL) return 403, "Permission denied" end on, off = get_nodes nodes = on + off nodes.uniq! return [200, JSON.generate(get_tokens_of_nodes(nodes))] end
CWE-384
1
def getFenceAgents(session, fence_agent = nil) fence_agent_list = {} agents = Dir.glob('/usr/sbin/fence_' + '*') agents.each { |a| fa = FenceAgent.new fa.name = a.sub(/.*\//,"") next if fa.name == "fence_ack_manual" if fence_agent and a.sub(/.*\//,"") == fence_agent.sub(/.*:/,"") required_options, optional_options, advanced_options, info = getFenceAgentMetadata(session, fa.name) fa.required_options = required_options fa.optional_options = optional_options fa.advanced_options = advanced_options fa.info = info end fence_agent_list[fa.name] = fa } fence_agent_list end
CWE-384
1
def cluster_status_gui(session, cluster_name, dont_update_config=false) cluster_nodes = get_cluster_nodes(cluster_name) status = cluster_status_from_nodes(session, cluster_nodes, cluster_name) unless status return 403, 'Permission denied' end new_cluster_nodes = [] new_cluster_nodes += status[:corosync_offline] if status[:corosync_offline] new_cluster_nodes += status[:corosync_online] if status[:corosync_online] new_cluster_nodes += status[:pacemaker_offline] if status[:pacemaker_offline] new_cluster_nodes += status[:pacemaker_online] if status[:pacemaker_online] new_cluster_nodes.uniq! if new_cluster_nodes.length > 0 config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text()) if !(dont_update_config or config.cluster_nodes_equal?(cluster_name, new_cluster_nodes)) old_cluster_nodes = config.get_nodes(cluster_name) $logger.info("Updating node list for: #{cluster_name} #{old_cluster_nodes}->#{new_cluster_nodes}") config.update_cluster(cluster_name, new_cluster_nodes) sync_config = Cfgsync::PcsdSettings.from_text(config.text()) # on version conflict just go on, config will be corrected eventually # by displaying the cluster in the web UI Cfgsync::save_sync_new_version( sync_config, get_corosync_nodes(), $cluster_name, true ) return cluster_status_gui(session, cluster_name, true) end end return JSON.generate(status) end
CWE-384
1
def get_stonith_agents_avail(session) code, result = send_cluster_request_with_token( session, params[:cluster], 'get_avail_fence_agents' ) return {} if 200 != code begin sa = JSON.parse(result) if (sa["noresponse"] == true) or (sa["notauthorized"] == "true") or (sa["notoken"] == true) or (sa["pacemaker_not_running"] == true) return {} else return sa end rescue JSON::ParserError return {} end end
CWE-384
1
def get_sync_capabilities(params, request, session) return JSON.generate({ 'syncable_configs' => Cfgsync::get_cfg_classes_by_name().keys, }) end
CWE-384
1
def initialize(session, configs, nodes, cluster_name, tokens={}) @configs = configs @nodes = nodes @cluster_name = cluster_name @published_configs_names = @configs.collect { |cfg| cfg.class.name } @additional_tokens = tokens @session = session end
CWE-384
1
def get_current_node_name() stdout, stderror, retval = run_cmd( PCSAuth.getSuperuserSession, CRM_NODE, "-n" ) if retval == 0 and stdout.length > 0 return stdout[0].chomp() end return "" end
CWE-384
1
def cluster_disable(params, request, session) if params[:name] code, response = send_request_with_token( session, params[:name], 'cluster_disable', true ) else if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end success = disable_cluster(session) if not success return JSON.generate({"error" => "true"}) end return "Cluster Disabled" end end
CWE-384
1
def resource_master(params, request, session) if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end unless params[:resource_id] return [400, 'resource_id has to be specified.'] end _, stderr, retval = run_cmd( session, PCS, 'resource', 'master', params[:resource_id] ) if retval != 0 return [400, 'Unable to create master/slave resource from ' + "'#{params[:resource_id]}': #{stderr.join('')}" ] end return 200 end
CWE-384
1
def get_cman_version() begin stdout, stderror, retval = run_cmd( PCSAuth.getSuperuserSession, CMAN_TOOL, "-V" ) rescue stdout = [] end if retval == 0 match = /(\d+)\.(\d+)\.(\d+)/.match(stdout.join()) if match return match[1..3].collect { | x | x.to_i } end end return nil end
CWE-384
1
def protected! gui_request = ( # these are URLs for web pages request.path == '/' or request.path == '/manage' or request.path == '/permissions' or request.path.match('/managec/.+/main') ) if request.path.start_with?('/remote/') or request.path == '/run_pcs' unless PCSAuth.loginByToken(session, cookies) halt [401, '{"notauthorized":"true"}'] end else #/managec/* /manage/* /permissions if !gui_request and request.env['HTTP_X_REQUESTED_WITH'] != 'XMLHttpRequest' then # Accept non GUI requests only with header # "X_REQUESTED_WITH: XMLHttpRequest". (check if they are send via AJAX). # This prevents CSRF attack. halt [401, '{"notauthorized":"true"}'] elsif not PCSAuth.isLoggedIn(session) if gui_request session[:pre_login_path] = request.path redirect '/login' else halt [401, '{"notauthorized":"true"}'] end end end end
CWE-384
1
def add_meta_attr(session, resource, key, value) stdout, stderr, retval = run_cmd( session, PCS, "resource", "meta", resource, key.to_s + "=" + value.to_s ) return retval end
CWE-384
1
def add_order_constraint( session, resourceA, resourceB, actionA, actionB, score, symmetrical=true, force=false, autocorrect=true ) sym = symmetrical ? "symmetrical" : "nonsymmetrical" if score != "" score = "score=" + score end command = [ PCS, "constraint", "order", actionA, resourceA, "then", actionB, resourceB, score, sym ] command << '--force' if force command << '--autocorrect' if autocorrect stdout, stderr, retval = run_cmd(session, *command) return retval, stderr.join(' ') end
CWE-384
1
def set_resource_utilization(params, reqest, session) unless allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end unless params[:resource_id] and params[:name] return 400, 'resource_id and name are required' end res_id = params[:resource_id] name = params[:name] value = params[:value] || '' _, stderr, retval = run_cmd( session, PCS, 'resource', 'utilization', res_id, "#{name}=#{value}" ) if retval != 0 return [400, "Unable to set utilization '#{name}=#{value}' for " + "resource '#{res_id}': #{stderr.join('')}" ] end return 200 end
CWE-384
1
def add_fence_level_remote(params, request, session) if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end retval, stdout, stderr = add_fence_level( session, params["level"], params["devices"], params["node"], params["remove"] ) if retval == 0 return [200, "Successfully added fence level"] else return [400, stderr] end end
CWE-384
1
def get_cib(params, request, session) if not allowed_for_local_cluster(session, Permissions::READ) return 403, 'Permission denied' end cib, stderr, retval = run_cmd(session, CIBADMIN, "-Ql") if retval != 0 if not pacemaker_running? return [400, '{"pacemaker_not_running":true}'] end return [500, "Unable to get CIB: " + cib.to_s + stderr.to_s] else return [200, cib] end end
CWE-384
1
def remove_acl_usergroup(session, role_id, usergroup_id) stdout, stderror, retval = run_cmd( session, PCS, "acl", "role", "unassign", role_id.to_s, usergroup_id.to_s, "--autodelete" ) if retval != 0 if stderror.empty? return "Error removing user / group" else return stderror.join("\n").strip end end return "" end
CWE-384
1
def get_cib_dom(session) begin stdout, _, retval = run_cmd(session, 'cibadmin', '-Q', '-l') if retval == 0 return REXML::Document.new(stdout.join("\n")) end rescue $logger.error 'Failed to parse cib.' end return nil end
CWE-384
1
def create_cluster(params, request, session) if not allowed_for_superuser(session) return 403, 'Permission denied' end if set_corosync_conf(params, request, session) cluster_start(params, request, session) else return "Failed" end end
CWE-384
1
def get_corosync_conf_remote(params, request, session) if not allowed_for_local_cluster(session, Permissions::READ) return 403, 'Permission denied' end return get_corosync_conf() end
CWE-384
1
def add_order_set_constraint( session, resource_set_list, force=false, autocorrect=true ) command = [PCS, "constraint", "order"] resource_set_list.each { |resource_set| command << "set" command.concat(resource_set) } command << '--force' if force command << '--autocorrect' if autocorrect stdout, stderr, retval = run_cmd(session, *command) return retval, stderr.join(' ') end
CWE-384
1
def get_quorum_info(params, request, session) if not allowed_for_local_cluster(session, Permissions::READ) return 403, 'Permission denied' end if ISRHEL6 stdout_status, stderr_status, retval = run_cmd( PCSAuth.getSuperuserSession, CMAN_TOOL, "status" ) stdout_nodes, stderr_nodes, retval = run_cmd( PCSAuth.getSuperuserSession, CMAN_TOOL, "nodes", "-F", "id,type,votes,name" ) if stderr_status.length > 0 return stderr_status.join elsif stderr_nodes.length > 0 return stderr_nodes.join else return stdout_status.join + "\n---Votes---\n" + stdout_nodes.join end else stdout, stderr, retval = run_cmd( PCSAuth.getSuperuserSession, COROSYNC_QUORUMTOOL, "-p", "-s" ) # retval is 0 on success if node is not in partition with quorum # retval is 1 on error OR on success if node has quorum if stderr.length > 0 return stderr.join else return stdout.join end end end
CWE-384
1
def get_corosync_nodes() stdout, stderror, retval = run_cmd( PCSAuth.getSuperuserSession, PCS, "status", "nodes", "corosync" ) if retval != 0 return [] end stdout.each {|x| x.strip!} corosync_online = stdout[1].sub(/^.*Online:/,"").strip corosync_offline = stdout[2].sub(/^.*Offline:/,"").strip corosync_nodes = (corosync_online.split(/ /)) + (corosync_offline.split(/ /)) return corosync_nodes end
CWE-384
1
def need_ring1_address?() out, errout, retval = run_cmd(PCSAuth.getSuperuserSession, COROSYNC_CMAPCTL) if retval != 0 return false else udpu_transport = false rrp = false out.each { |line| # support both corosync-objctl and corosync-cmapctl format if /^\s*totem\.transport(\s+.*)?=\s*udpu$/.match(line) udpu_transport = true elsif /^\s*totem\.rrp_mode(\s+.*)?=\s*(passive|active)$/.match(line) rrp = true end } # on rhel6 ring1 address is required regardless of transport # it has to be added to cluster.conf in order to set up ring1 # in corosync by cman return ((ISRHEL6 and rrp) or (rrp and udpu_transport)) end end
CWE-384
1
def get_nodes_status() corosync_online = [] corosync_offline = [] pacemaker_online = [] pacemaker_offline = [] pacemaker_standby = [] in_pacemaker = false stdout, stderr, retval = run_cmd( PCSAuth.getSuperuserSession, PCS, "status", "nodes", "both" ) stdout.each {|l| l = l.chomp if l.start_with?("Pacemaker Nodes:") in_pacemaker = true end if l.start_with?("Pacemaker Remote Nodes:") break end if l.end_with?(":") next end title,nodes = l.split(/: /,2) if nodes == nil next end if title == " Online" in_pacemaker ? pacemaker_online.concat(nodes.split(/ /)) : corosync_online.concat(nodes.split(/ /)) elsif title == " Standby" if in_pacemaker pacemaker_standby.concat(nodes.split(/ /)) end elsif title == " Maintenance" if in_pacemaker pacemaker_online.concat(nodes.split(/ /)) end else in_pacemaker ? pacemaker_offline.concat(nodes.split(/ /)) : corosync_offline.concat(nodes.split(/ /)) end } return { 'corosync_online' => corosync_online, 'corosync_offline' => corosync_offline, 'pacemaker_online' => pacemaker_online, 'pacemaker_offline' => pacemaker_offline, 'pacemaker_standby' => pacemaker_standby, } end
CWE-384
1
def run_cmd_options(session, options, *args) $logger.info("Running: " + args.join(" ")) start = Time.now out = "" errout = "" proc_block = proc { |pid, stdin, stdout, stderr| if options and options.key?('stdin') stdin.puts(options['stdin']) stdin.close() end out = stdout.readlines() errout = stderr.readlines() duration = Time.now - start $logger.debug(out) $logger.debug(errout) $logger.debug("Duration: " + duration.to_s + "s") } cib_user = session[:username] # when running 'id -Gn' to get the groups they are not defined yet cib_groups = (session[:usergroups] || []).join(' ') $logger.info("CIB USER: #{cib_user}, groups: #{cib_groups}") # Open4.popen4 reimplementation which sets ENV in a child process prior # to running an external process by exec status = Open4::do_popen(proc_block, :init) { |ps_read, ps_write| ps_read.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) ps_write.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) ENV['CIB_user'] = cib_user ENV['CIB_user_groups'] = cib_groups exec(*args) } retval = status.exitstatus $logger.info("Return Value: " + retval.to_s) return out, errout, retval end
CWE-384
1
def send_request_with_token(session, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30, additional_tokens={}) token = additional_tokens[node] || get_node_token(node) $logger.info "SRWT Node: #{node} Request: #{request}" if not token $logger.error "Unable to connect to node #{node}, no token available" return 400,'{"notoken":true}' end cookies_data = { 'token' => token, } return send_request( session, node, request, post, data, remote, raw_data, timeout, cookies_data ) end
CWE-384
1
def self.loginByToken(session, cookies) if username = validToken(cookies["token"]) if SUPERUSER == username if cookies['CIB_user'] and cookies['CIB_user'].strip != '' session[:username] = cookies['CIB_user'] if cookies['CIB_user_groups'] and cookies['CIB_user_groups'].strip != '' session[:usergroups] = cookieUserDecode( cookies['CIB_user_groups'] ).split(nil) else session[:usergroups] = [] end else session[:username] = SUPERUSER session[:usergroups] = [] end return true else session[:username] = username success, groups = getUsersGroups(username) session[:usergroups] = success ? groups : [] return true end end return false end
CWE-384
1
def resource_stop(params, request, session) if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end stdout, stderr, retval = run_cmd( session, PCS, "resource", "disable", params[:resource] ) if retval == 0 return JSON.generate({"success" => "true"}) else return JSON.generate({"error" => "true", "stdout" => stdout, "stderror" => stderr}) end end
CWE-384
1
def remove_constraint_remote(params, request, session) if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end if params[:constraint_id] retval = remove_constraint(session, params[:constraint_id]) if retval == 0 return "Constraint #{params[:constraint_id]} removed" else return [400, "Error removing constraint: #{params[:constraint_id]}"] end else return [400,"Bad Constraint Options"] end end
CWE-384
1
def set_certs(params, request, session) if not allowed_for_local_cluster(session, Permissions::FULL) return 403, 'Permission denied' end ssl_cert = (params['ssl_cert'] || '').strip ssl_key = (params['ssl_key'] || '').strip if ssl_cert.empty? and !ssl_key.empty? return [400, 'cannot save ssl certificate without ssl key'] end if !ssl_cert.empty? and ssl_key.empty? return [400, 'cannot save ssl key without ssl certificate'] end if !ssl_cert.empty? and !ssl_key.empty? ssl_errors = verify_cert_key_pair(ssl_cert, ssl_key) if ssl_errors and !ssl_errors.empty? return [400, ssl_errors.join] end begin write_file_lock(CRT_FILE, 0700, ssl_cert) write_file_lock(KEY_FILE, 0700, ssl_key) rescue => e # clean the files if we ended in the middle # the files will be regenerated on next pcsd start FileUtils.rm(CRT_FILE, {:force => true}) FileUtils.rm(KEY_FILE, {:force => true}) return [400, "cannot save ssl files: #{e}"] end end if params['cookie_secret'] cookie_secret = params['cookie_secret'].strip if !cookie_secret.empty? begin write_file_lock(COOKIE_FILE, 0700, cookie_secret) rescue => e return [400, "cannot save cookie secret: #{e}"] end end end return [200, 'success'] end
CWE-384
1
def wizard_submit(params, request, session) if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end wizard = PCSDWizard.getWizard(params["wizard"]) if wizard != nil return erb wizard.process_responses(params) else return "Error finding Wizard - #{params["wizard"]}" end end
CWE-384
1
def run_cmd(session, *args) options = {} return run_cmd_options(session, options, *args) end
CWE-384
1
def get_node_attributes(session, cib_dom=nil) unless cib_dom cib_dom = get_cib_dom(session) return {} unless cib_dom end node_attrs = {} cib_dom.elements.each( '/cib/configuration/nodes/node/instance_attributes/nvpair' ) { |e| node = e.parent.parent.attributes['uname'] node_attrs[node] ||= [] node_attrs[node] << { :id => e.attributes['id'], :key => e.attributes['name'], :value => e.attributes['value'] } } node_attrs.each { |_, val| val.sort_by! { |obj| obj[:key] }} return node_attrs end
CWE-384
1
def set_configs(params, request, session) if not allowed_for_local_cluster(session, Permissions::FULL) return 403, 'Permission denied' end return JSON.generate({'status' => 'bad_json'}) if not params['configs'] begin configs_json = JSON.parse(params['configs']) rescue JSON::ParserError return JSON.generate({'status' => 'bad_json'}) end has_cluster = !($cluster_name == nil or $cluster_name.empty?) if has_cluster and $cluster_name != configs_json['cluster_name'] return JSON.generate({'status' => 'wrong_cluster_name'}) end $semaphore_cfgsync.synchronize { force = configs_json['force'] remote_configs, unknown_cfg_names = Cfgsync::sync_msg_to_configs(configs_json) local_configs = Cfgsync::get_configs_local result = {} unknown_cfg_names.each { |name| result[name] = 'not_supported' } remote_configs.each { |name, remote_cfg| begin # Save a remote config if it is a newer version than local. If the config # is not present on a local node, the node is beeing added to a cluster, # so we need to save the config as well. if force or not local_configs.key?(name) or remote_cfg > local_configs[name] local_configs[name].class.backup() if local_configs.key?(name) remote_cfg.save() result[name] = 'accepted' elsif remote_cfg == local_configs[name] # Someone wants this node to have a config that it already has. # So the desired state is met and the result is a success then. result[name] = 'accepted' else result[name] = 'rejected' end rescue => e $logger.error("Error saving config '#{name}': #{e}") result[name] = 'error' end } return JSON.generate({'status' => 'ok', 'result' => result}) } end
CWE-384
1
def get_avail_fence_agents(params, request, session) if not allowed_for_local_cluster(session, Permissions::READ) return 403, 'Permission denied' end agents = getFenceAgents(session) return JSON.generate(agents) end
CWE-384
1
def add_node(session, new_nodename, all=false, auto_start=true) if all command = [PCS, "cluster", "node", "add", new_nodename] if auto_start command << '--start' command << '--enable' end out, stderror, retval = run_cmd(session, *command) else out, stderror, retval = run_cmd( session, PCS, "cluster", "localnode", "add", new_nodename ) end $logger.info("Adding #{new_nodename} to pcs_settings.conf") corosync_nodes = get_corosync_nodes() pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text()) pcs_config.update_cluster($cluster_name, corosync_nodes) sync_config = Cfgsync::PcsdSettings.from_text(pcs_config.text()) # on version conflict just go on, config will be corrected eventually # by displaying the cluster in the web UI Cfgsync::save_sync_new_version( sync_config, corosync_nodes, $cluster_name, true ) return retval, out.join("\n") + stderror.join("\n") end
CWE-384
1
def getResourceAgents(session) resource_agent_list = {} stdout, stderr, retval = run_cmd(session, PCS, "resource", "list", "--nodesc") if retval != 0 $logger.error("Error running 'pcs resource list --nodesc") $logger.error(stdout + stderr) return {} end agents = stdout agents.each { |a| ra = ResourceAgent.new ra.name = a.chomp resource_agent_list[ra.name] = ra } return resource_agent_list end
CWE-384
1
def get_cluster_properties_definition(params, request, session) unless allowed_for_local_cluster(session, Permissions::READ) return 403, 'Permission denied' end stdout, _, retval = run_cmd( session, PCS, 'property', 'get_cluster_properties_definition' ) if retval == 0 return [200, stdout] end return [400, '{}'] end
CWE-384
1
def set_corosync_conf(params, request, session) if not allowed_for_local_cluster(session, Permissions::FULL) return 403, 'Permission denied' end if params[:corosync_conf] != nil and params[:corosync_conf].strip != "" Cfgsync::CorosyncConf.backup() Cfgsync::CorosyncConf.from_text(params[:corosync_conf]).save() return 200, "Succeeded" else $logger.info "Invalid corosync.conf file" return 400, "Failed" end end
CWE-384
1
def update_cluster_settings(params, request, session) properties = params['config'] to_update = [] current = getAllSettings(session) # We need to be able to set cluster properties also from older version GUI. # This code handles proper processing of checkboxes. # === backward compatibility layer start === params['hidden'].each { |prop, val| next if prop == 'hidden_input' unless properties.include?(prop) properties[prop] = val to_update << prop end } # === backward compatibility layer end === properties.each { |prop, val| val.strip! if not current.include?(prop) and val != '' # add to_update << prop elsif current.include?(prop) and val == '' # remove to_update << prop elsif current.include?(prop) and current[prop] != val # update to_update << prop end } if to_update.count { |x| x.downcase == 'enable-acl' } > 0 if not allowed_for_local_cluster(session, Permissions::GRANT) return 403, 'Permission denied' end end if to_update.count { |x| x.downcase != 'enable-acl' } > 0 if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end end if to_update.empty? $logger.info('No properties to update') else cmd_args = [] to_update.each { |prop| cmd_args << "#{prop.downcase}=#{properties[prop]}" } stdout, stderr, retval = run_cmd(session, PCS, 'property', 'set', *cmd_args) if retval != 0 return [400, stderr.join('').gsub(', (use --force to override)', '')] end end return [200, "Update Successful"] end
CWE-384
1
def resource_change_group(params, request, session) if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end if params[:resource_id].nil? or params[:group_id].nil? return [400, 'resource_id and group_id have to be specified.'] end if params[:group_id].empty? if params[:old_group_id] _, stderr, retval = run_cmd( session, PCS, 'resource', 'group', 'remove', params[:old_group_id], params[:resource_id] ) if retval != 0 return [400, "Unable to remove resource '#{params[:resource_id]}' " + "from group '#{params[:old_group_id]}': #{stderr.join('')}" ] end end return 200 end _, stderr, retval = run_cmd( session, PCS, 'resource', 'group', 'add', params[:group_id], params[:resource_id] ) if retval != 0 return [400, "Unable to add resource '#{params[:resource_id]}' to " + "group '#{params[:group_id]}': #{stderr.join('')}" ] end return 200 end
CWE-384
1
def resource_status(params, request, session) if not allowed_for_local_cluster(session, Permissions::READ) return 403, 'Permission denied' end resource_id = params[:resource] @resources,@groups = getResourcesGroups(session) location = "" res_status = "" @resources.each {|r| if r.id == resource_id if r.failed res_status = "Failed" elsif !r.active res_status = "Inactive" else res_status = "Running" end if r.nodes.length != 0 location = r.nodes[0].name break end end } status = {"location" => location, "status" => res_status} return JSON.generate(status) end
CWE-384
1
def add_constraint_remote(params, request, session) if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end case params["c_type"] when "loc" retval, error = add_location_constraint( session, params["res_id"], params["node_id"], params["score"], params["force"], !params['disable_autocorrect'] ) when "ord" resA = params["res_id"] resB = params["target_res_id"] actionA = params['res_action'] actionB = params['target_action'] if params["order"] == "before" resA, resB = resB, resA actionA, actionB = actionB, actionA end retval, error = add_order_constraint( session, resA, resB, actionA, actionB, params["score"], true, params["force"], !params['disable_autocorrect'] ) when "col" resA = params["res_id"] resB = params["target_res_id"] score = params["score"] if params["colocation_type"] == "apart" if score.length > 0 and score[0] != "-" score = "-" + score elsif score == "" score = "-INFINITY" end end retval, error = add_colocation_constraint( session, resA, resB, score, params["force"], !params['disable_autocorrect'] ) else return [400, "Unknown constraint type: #{params['c_type']}"] end
CWE-384
1
def remove_acl_remote(params, request, session) if not allowed_for_local_cluster(session, Permissions::GRANT) return 403, 'Permission denied' end if params["item"] == "permission" retval = remove_acl_permission(session, params["acl_perm_id"]) elsif params["item"] == "usergroup" retval = remove_acl_usergroup( session, params["role_id"],params["usergroup_id"] ) else retval = "Error: Unknown removal request" end if retval == "" return [200, "Successfully removed permission from role"] else return [400, retval] end end
CWE-384
1
def self.save_sync_new_version(config, nodes, cluster_name, fetch_on_conflict, tokens={}) if not cluster_name or cluster_name.empty? # we run on a standalone host, no config syncing config.version += 1 config.save() return true, {} else # we run in a cluster so we need to sync the config publisher = ConfigPublisher.new( PCSAuth.getSuperuserSession(), [config], nodes, cluster_name, tokens ) old_configs, node_responses = publisher.publish() if old_configs.include?(config.class.name) if fetch_on_conflict fetcher = ConfigFetcher.new( PCSAuth.getSuperuserSession(), [config.class], nodes, cluster_name ) cfgs_to_save, _ = fetcher.fetch() cfgs_to_save.each { |cfg_to_save| cfg_to_save.save() if cfg_to_save.class == config.class } end return false, node_responses end return true, node_responses end end
CWE-384
1
def pcsd_restart_nodes(session, nodes) node_response = {} threads = [] nodes.each { |node| threads << Thread.new { code, response = send_request_with_token( session, node, '/pcsd_restart', true ) node_response[node] = [code, response] } } threads.each { |t| t.join } node_error = [] node_status = {} node_response.each { |node, response| if response[0] == 200 node_status[node] = { 'status' => 'ok', 'text' => 'Success', } else text = response[1] if response[0] == 401 text = "Unable to authenticate, try running 'pcs cluster auth'" elsif response[0] == 400 begin parsed_response = JSON.parse(response[1], {:symbolize_names => true}) if parsed_response[:noresponse] text = "Unable to connect" elsif parsed_response[:notoken] or parsed_response[:notauthorized] text = "Unable to authenticate, try running 'pcs cluster auth'" end rescue JSON::ParserError end end node_status[node] = { 'status' => 'error', 'text' => text } node_error << node end } return { 'status' => node_error.empty?() ? 'ok' : 'error', 'text' => node_error.empty?() ? 'Success' : \ "Unable to restart pcsd on nodes: #{node_error.join(', ')}", 'node_status' => node_status, } end
CWE-384
1
def add_acl_remote(params, request, session) if not allowed_for_local_cluster(session, Permissions::GRANT) return 403, 'Permission denied' end if params["item"] == "permission" retval = add_acl_permission( session, params["role_id"], params["type"], params["xpath_id"], params["query_id"] ) elsif (params["item"] == "user") or (params["item"] == "group") retval = add_acl_usergroup( session, params["role_id"], params["item"], params["usergroup"] ) else retval = "Error: Unknown adding request" end if retval == "" return [200, "Successfully added permission to role"] else return [ 400, retval.include?("cib_replace failed") ? "Error adding permission" : retval ] end end
CWE-384
1
def node_restart(params, request, session) if params[:name] code, response = send_request_with_token( session, params[:name], 'node_restart', true ) else if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end $logger.info "Restarting Node" output = `/sbin/reboot` $logger.debug output return output end end
CWE-384
1
def node_standby(params, request, session) if params[:name] code, response = send_request_with_token( session, params[:name], 'node_standby', true, {"node"=>params[:name]} ) # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd else if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end $logger.info "Standby Node" stdout, stderr, retval = run_cmd(session, PCS, "cluster", "standby") return stdout end end
CWE-384
1
def check_gui_status_of_nodes(session, nodes, check_mutuality=false, timeout=10) options = {} options[:check_auth_only] = '' if not check_mutuality threads = [] not_authorized_nodes = [] online_nodes = [] offline_nodes = [] nodes = nodes.uniq.sort nodes.each { |node| threads << Thread.new { code, response = send_request_with_token( session, node, 'check_auth', false, options, true, nil, timeout ) if code == 200 if check_mutuality begin parsed_response = JSON.parse(response) if parsed_response['node_list'] and parsed_response['node_list'].uniq.sort == nodes online_nodes << node else not_authorized_nodes << node end rescue not_authorized_nodes << node end else online_nodes << node end else begin parsed_response = JSON.parse(response) if parsed_response['notauthorized'] or parsed_response['notoken'] not_authorized_nodes << node else offline_nodes << node end rescue JSON::ParserError end end } } threads.each { |t| t.join } return online_nodes, offline_nodes, not_authorized_nodes end
CWE-384
1
def cluster_enable(params, request, session) if params[:name] code, response = send_request_with_token( session, params[:name], 'cluster_enable', true ) else if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end success = enable_cluster(session) if not success return JSON.generate({"error" => "true"}) end return "Cluster Enabled" end end
CWE-384
1
def resource_metadata(params, request, session) if not allowed_for_local_cluster(session, Permissions::READ) return 403, 'Permission denied' end return 200 if not params[:resourcename] or params[:resourcename] == "" resource_name = params[:resourcename][params[:resourcename].rindex(':')+1..-1] class_provider = params[:resourcename][0,params[:resourcename].rindex(':')] @resource = ResourceAgent.new(params[:resourcename]) if class_provider == "ocf:heartbeat" @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, HEARTBEAT_AGENTS_DIR + resource_name) elsif class_provider == "ocf:pacemaker" @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, PACEMAKER_AGENTS_DIR + resource_name) elsif class_provider == 'nagios' @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, NAGIOS_METADATA_DIR + resource_name + '.xml') end @new_resource = params[:new] @resources, @groups = getResourcesGroups(session) erb :resourceagentform end
CWE-384
1
def testLoginByToken users = [] users << {"username" => "user1", "token" => "token1"} users << {"username" => "user2", "token" => "token2"} users << {"username" => SUPERUSER, "token" => "tokenS"} password_file = File.open($user_pass_file, File::RDWR|File::CREAT) password_file.truncate(0) password_file.rewind password_file.write(JSON.pretty_generate(users)) password_file.close() session = {} cookies = {} result = PCSAuth.loginByToken(session, cookies) assert_equal(false, result) assert_equal({}, session) session = {} cookies = {'token' => 'tokenX'} result = PCSAuth.loginByToken(session, cookies) assert_equal(false, result) assert_equal({}, session) session = {} cookies = {'token' => 'token1'} result = PCSAuth.loginByToken(session, cookies) assert_equal(true, result) assert_equal( {:username => 'user1', :usergroups => ['group1', 'haclient']}, session ) session = {} cookies = { 'token' => 'token1', 'CIB_user' => 'userX', 'CIB_user_groups' => PCSAuth.cookieUserEncode('groupX') } result = PCSAuth.loginByToken(session, cookies) assert_equal(true, result) assert_equal( {:username => 'user1', :usergroups => ['group1', 'haclient']}, session ) session = {} cookies = {'token' => 'tokenS'} result = PCSAuth.loginByToken(session, cookies) assert_equal(true, result) assert_equal( {:username => SUPERUSER, :usergroups => []}, session ) session = {} cookies = { 'token' => 'tokenS', 'CIB_user' => 'userX', 'CIB_user_groups' => PCSAuth.cookieUserEncode('groupX') } result = PCSAuth.loginByToken(session, cookies) assert_equal(true, result) assert_equal( {:username => 'userX', :usergroups => ['groupX']}, session ) end
CWE-384
1
def remove_resource(params, request, session) if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end force = params['force'] no_error_if_not_exists = params.include?('no_error_if_not_exists') errors = "" params.each { |k,v| if k.index("resid-") == 0 resid = k.gsub('resid-', '') command = [PCS, 'resource', 'delete', resid] command << '--force' if force out, errout, retval = run_cmd(session, *command) if retval != 0 unless out.index(" does not exist.") != -1 and no_error_if_not_exists errors += errout.join(' ').strip + "\n" end end end } errors.strip! if errors == "" return 200 else $logger.info("Remove resource errors:\n"+errors) return [400, errors] end end
CWE-384
1
def remove_acl_roles_remote(params, request, session) if not allowed_for_local_cluster(session, Permissions::GRANT) return 403, 'Permission denied' end errors = "" params.each { |name, value| if name.index("role-") == 0 out, errout, retval = run_cmd( session, PCS, "acl", "role", "delete", value.to_s, "--autodelete" ) if retval != 0 errors += "Unable to remove role #{value}" unless errout.include?("cib_replace failure") errors += ": #{errout.join(" ").strip()}" end errors += "\n" $logger.info errors end end } if errors == "" return [200, "Successfully removed ACL roles"] else return [400, errors] end end
CWE-384
1
def get_tokens(params, request, session) # pcsd runs as root thus always returns hacluster's tokens if not allowed_for_local_cluster(session, Permissions::FULL) return 403, 'Permission denied' end return [200, JSON.generate(read_tokens)] end
CWE-384
1
def self.save_sync_new_tokens(config, new_tokens, nodes, cluster_name) with_new_tokens = PCSTokens.new(config.text) with_new_tokens.tokens.update(new_tokens) config_new = PcsdTokens.from_text(with_new_tokens.text) if not cluster_name or cluster_name.empty? # we run on a standalone host, no config syncing config_new.version += 1 config_new.save() return true, {} end # we run in a cluster so we need to sync the config publisher = ConfigPublisher.new( PCSAuth.getSuperuserSession(), [config_new], nodes, cluster_name, new_tokens ) old_configs, node_responses = publisher.publish() if not old_configs.include?(config_new.class.name) # no node had newer tokens file, we are ok, everything done return true, node_responses end # get tokens from all nodes and merge them fetcher = ConfigFetcher.new( PCSAuth.getSuperuserSession(), [config_new.class], nodes, cluster_name ) fetched_tokens = fetcher.fetch_all()[config_new.class.name] config_new = Cfgsync::merge_tokens_files(config, fetched_tokens, new_tokens) # and try to publish again return Cfgsync::save_sync_new_version( config_new, nodes, cluster_name, true, new_tokens ) end
CWE-384
1
def set_sync_options(params, request, session) if not allowed_for_local_cluster(session, Permissions::FULL) return 403, 'Permission denied' end options = [ 'sync_thread_pause', 'sync_thread_resume', 'sync_thread_disable', 'sync_thread_enable', ] if params.keys.count { |key| options.include?(key) } != 1 return [400, 'Exactly one option has to be specified'] end if params['sync_thread_disable'] if Cfgsync::ConfigSyncControl.sync_thread_disable($semaphore_cfgsync) return 'sync thread disabled' else return [400, 'sync thread disable error'] end end if params['sync_thread_enable'] if Cfgsync::ConfigSyncControl.sync_thread_enable() return 'sync thread enabled' else return [400, 'sync thread enable error'] end end if params['sync_thread_resume'] if Cfgsync::ConfigSyncControl.sync_thread_resume() return 'sync thread resumed' else return [400, 'sync thread resume error'] end end if params['sync_thread_pause'] if Cfgsync::ConfigSyncControl.sync_thread_pause( $semaphore_cfgsync, params['sync_thread_pause'] ) return 'sync thread paused' else return [400, 'sync thread pause error'] end end return [400, 'Exactly one option has to be specified'] end
CWE-384
1
def protected! gui_request = ( # these are URLs for web pages request.path == '/' or request.path == '/manage' or request.path == '/permissions' or request.path.match('/managec/.+/main') ) if request.path.start_with?('/remote/') or request.path == '/run_pcs' @auth_user = PCSAuth.loginByToken(cookies) unless @auth_user halt [401, '{"notauthorized":"true"}'] end else #/managec/* /manage/* /permissions if !gui_request and request.env['HTTP_X_REQUESTED_WITH'] != 'XMLHttpRequest' then # Accept non GUI requests only with header # "X_REQUESTED_WITH: XMLHttpRequest". (check if they are send via AJAX). # This prevents CSRF attack. halt [401, '{"notauthorized":"true"}'] elsif not PCSAuth.isLoggedIn(session) if gui_request session[:pre_login_path] = request.path redirect '/login' else halt [401, '{"notauthorized":"true"}'] end end end end
CWE-384
1
def set_cluster_conf(params, request, session) if not allowed_for_local_cluster(session, Permissions::FULL) return 403, 'Permission denied' end if params[:cluster_conf] != nil and params[:cluster_conf].strip != "" Cfgsync::ClusterConf.backup() Cfgsync::ClusterConf.from_text(params[:cluster_conf]).save() return 200, 'Updated cluster.conf...' else $logger.info "Invalid cluster.conf file" return 400, 'Failed to update cluster.conf...' end end
CWE-384
1
def verify_cert_key_pair(cert, key) errors = [] cert_modulus = nil key_modulus = nil stdout, stderr, retval = run_cmd_options( PCSAuth.getSuperuserSession(), { 'stdin' => cert, }, '/usr/bin/openssl', 'x509', '-modulus', '-noout' ) if retval != 0 errors << "Invalid certificate: #{stderr.join}" else cert_modulus = stdout.join.strip end stdout, stderr, retval = run_cmd_options( PCSAuth.getSuperuserSession(), { 'stdin' => key, }, '/usr/bin/openssl', 'rsa', '-modulus', '-noout' ) if retval != 0 errors << "Invalid key: #{stderr.join}" else key_modulus = stdout.join.strip end if errors.empty? and cert_modulus and key_modulus if cert_modulus != key_modulus errors << 'Certificate does not match the key' end end return errors end
CWE-384
1
def add_colocation_constraint( session, resourceA, resourceB, score, force=false, autocorrect=true ) if score == "" or score == nil score = "INFINITY" end command = [ PCS, "constraint", "colocation", "add", resourceA, resourceB, score ] command << '--force' if force command << '--autocorrect' if autocorrect stdout, stderr, retval = run_cmd(session, *command) return retval, stderr.join(' ') end
CWE-384
1
def get_pacemaker_version() begin stdout, stderror, retval = run_cmd( PCSAuth.getSuperuserSession, PACEMAKERD, "-$" ) rescue stdout = [] end if retval == 0 match = /(\d+)\.(\d+)\.(\d+)/.match(stdout.join()) if match return match[1..3].collect { | x | x.to_i } end end return nil end
CWE-384
1
def protected! if not PCSAuth.loginByToken(session, cookies) and not PCSAuth.isLoggedIn(session) # If we're on /managec/<cluster_name>/main we redirect match_expr = "/managec/(.*)/(.*)" mymatch = request.path.match(match_expr) on_managec_main = false if mymatch and mymatch.length >= 3 and mymatch[2] == "main" on_managec_main = true end if request.path.start_with?('/remote') or (request.path.match(match_expr) and not on_managec_main) or '/run_pcs' == request.path or '/clusters_overview' == request.path or request.path.start_with?('/permissions_') then $logger.info "ERROR: Request without authentication" halt [401, '{"notauthorized":"true"}'] else session[:pre_login_path] = request.path redirect '/login' end end end
CWE-352
0
def remove_constraint(session, constraint_id) stdout, stderror, retval = run_cmd( session, PCS, "constraint", "remove", constraint_id ) $logger.info stdout return retval end
CWE-384
1
def get_resource_agents_avail(session) code, result = send_cluster_request_with_token( session, params[:cluster], 'get_avail_resource_agents' ) return {} if 200 != code begin ra = JSON.parse(result) if (ra["noresponse"] == true) or (ra["notauthorized"] == "true") or (ra["notoken"] == true) or (ra["pacemaker_not_running"] == true) return {} else return ra end rescue JSON::ParserError return {} end end
CWE-384
1
def node_unstandby(params, request, session) if params[:name] code, response = send_request_with_token( session, params[:name], 'node_unstandby', true, {"node"=>params[:name]} ) # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd else if not allowed_for_local_cluster(session, Permissions::WRITE) return 403, 'Permission denied' end $logger.info "Unstandby Node" stdout, stderr, retval = run_cmd(session, PCS, "cluster", "unstandby") return stdout end end
CWE-384
1
def self.getUsersGroups(username) stdout, stderr, retval = run_cmd( getSuperuserSession, "id", "-Gn", username ) if retval != 0 $logger.info( "Unable to determine groups of user '#{username}': #{stderr.join(' ').strip}" ) return [false, []] end return [true, stdout.join(' ').split(nil)] end
CWE-384
1
function cleaner() { setTimeout(() => { if (ids.length < 1) { return; } ActiveRooms.forEach((element, index) => { element.Players.forEach((element2, index2) => { if (!ids.includes(element2.Id)) { ActiveRooms[index].Players.splice(index2, 1); ActiveRooms[index].Players.forEach((element3) => { element3.socket.emit('UserLeave', JSON.stringify({ RoomId: element.Id, UserLeaved: element2.Id })); }); } }); }); ActiveRooms.forEach((element, index) => { if (element.Players.length === 0) { ActiveRooms.splice(index, 1); } }); // ids = []; }, 5000); }
CWE-384
1
export async function execRequest(routes: Routers, ctx: AppContext) { const match = findMatchingRoute(ctx.path, routes); if (!match) throw new ErrorNotFound(); const endPoint = match.route.findEndPoint(ctx.request.method as HttpMethod, match.subPath.schema); if (ctx.URL && !isValidOrigin(ctx.URL.origin, baseUrl(endPoint.type), endPoint.type)) throw new ErrorNotFound(`Invalid origin: ${ctx.URL.origin}`, 'invalidOrigin'); // This is a generic catch-all for all private end points - if we // couldn't get a valid session, we exit now. Individual end points // might have additional permission checks depending on the action. if (!match.route.isPublic(match.subPath.schema) && !ctx.joplin.owner) throw new ErrorForbidden(); return endPoint.handler(match.subPath, ctx); }
CWE-352
0
function RegisterUserName(socket, Data) { var userName = Data.UserName.split('>').join(' ').split('<').join(' ').split('/').join(' '); if (userName.length > 16) userName = userName.slice(0, 16); if (userName.toLowerCase().includes('você')) userName = '~' + userName; ActiveRooms.forEach((element, index) => { if (element.Id === Data.RoomId) { ActiveRooms[index].Players.forEach((element2, index2) => { if (element2.Name.toLowerCase() === userName.toLowerCase()) { console.log('UserName already exists'); return socket.emit('UserNameAlreadyExists'); } else if (element2.Id === Data.UserId) { ActiveRooms[index].Players[index2].Name = userName; } socket.emit('UserNameInformation', JSON.stringify({ RoomId: element.Id, UserId: element2.Id, NickName: element2.Name })); }); } }); }
CWE-384
1
triggerMassAction: function (massActionUrl, type) { const self = this.relatedListInstance; let validationResult = self.checkListRecordSelected(); if (validationResult != true) { let progressIndicatorElement = $.progressIndicator(), selectedIds = self.readSelectedIds(true), excludedIds = self.readExcludedIds(true), cvId = self.getCurrentCvId(), postData = self.getCompleteParams(); delete postData.mode; delete postData.view; postData.viewname = cvId; postData.selected_ids = selectedIds; postData.excluded_ids = excludedIds; let actionParams = { type: 'POST', url: massActionUrl, data: postData }; if (type === 'sendByForm') { app.openUrlMethodPost(massActionUrl, postData); progressIndicatorElement.progressIndicator({ mode: 'hide' }); } else { AppConnector.request(actionParams) .done(function (responseData) { progressIndicatorElement.progressIndicator({ mode: 'hide' }); if (responseData && responseData.result !== null) { if (responseData.result.notify) { Vtiger_Helper_Js.showMessage(responseData.result.notify); } if (responseData.result.reloadList) { Vtiger_Detail_Js.reloadRelatedList(); } if (responseData.result.processStop) { progressIndicatorElement.progressIndicator({ mode: 'hide' }); return false; } } }) .fail(function (error, err) { progressIndicatorElement.progressIndicator({ mode: 'hide' }); }); } } else { self.noRecordSelectedAlert(); } },
CWE-352
0