text
stringlengths
213
7.14k
idx
int64
16
12.5k
--- initial +++ final @@ -1,79 +1,79 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct virtio_gpu_fbdev *vfbdev = container_of(helper, struct virtio_gpu_fbdev, helper); struct drm_device *dev = helper->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd = {}; struct virtio_gpu_object *obj; uint32_t resid, format, size; int ret; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = mode_cmd.width * 4; mode_cmd.pixel_format = drm_mode_legacy_fb_format(32, 24); switch (mode_cmd.pixel_format) { #ifdef __BIG_ENDIAN case DRM_FORMAT_XRGB8888: format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; break; case DRM_FORMAT_ARGB8888: format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; break; case DRM_FORMAT_BGRX8888: format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; break; case DRM_FORMAT_BGRA8888: format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; break; case DRM_FORMAT_RGBX8888: format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM; break; case DRM_FORMAT_RGBA8888: format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM; break; case DRM_FORMAT_XBGR8888: format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM; break; case DRM_FORMAT_ABGR8888: format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM; break; #else case DRM_FORMAT_XRGB8888: format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; break; case DRM_FORMAT_ARGB8888: format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; break; case DRM_FORMAT_BGRX8888: format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; break; case DRM_FORMAT_BGRA8888: format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; break; case DRM_FORMAT_RGBX8888: format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM; break; case DRM_FORMAT_RGBA8888: format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM; break; case DRM_FORMAT_XBGR8888: format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM; break; case DRM_FORMAT_ABGR8888: format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM; break; #endif default: DRM_ERROR("failed to find virtio gpu format for %d\n", mode_cmd.pixel_format); return -EINVAL; } size = mode_cmd.pitches[0] * mode_cmd.height; obj = virtio_gpu_alloc_object(dev, size, false, true); if (IS_ERR(obj)) return PTR_ERR(obj); virtio_gpu_resource_id_get(vgdev, &resid); virtio_gpu_cmd_create_resource(vgdev, resid, format, mode_cmd.width, mode_cmd.height); ret = virtio_gpu_vmap_fb(vgdev, obj); if (ret) { DRM_ERROR("failed to vmap fb %d\n", ret); goto err_obj_vmap; } /* attach the object to the resource */ ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL); if (ret) goto err_obj_attach; info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { ret = PTR_ERR(info); goto err_fb_alloc; } info->par = helper; ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb, &mode_cmd, &obj->gem_base); if (ret) goto err_fb_init; fb = &vfbdev->vgfb.base; vfbdev->helper.fb = fb; strcpy(info->fix.id, "virtiodrmfb"); info->flags = FBINFO_DEFAULT; info->fbops = &virtio_gpufb_ops; info->pixmap.flags = FB_PIXMAP_SYSTEM; info->screen_base = obj->vmap; info->screen_size = obj->gem_base.size; - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(info, &vfbdev->helper, sizes->fb_width, sizes->fb_height); info->fix.mmio_start = 0; info->fix.mmio_len = 0; return 0; err_fb_init: drm_fb_helper_release_fbi(helper); err_fb_alloc: virtio_gpu_cmd_resource_inval_backing(vgdev, resid); err_obj_attach: err_obj_vmap: virtio_gpu_gem_free_object(&obj->gem_base); return ret; }<sep>@@ struct drm_framebuffer *fb; @@ - fb->depth + fb->format->depth @@ struct drm_framebuffer *fb; @@ - (fb->format->depth) + fb->format->depth <|end_of_text|>
8,672
--- initial +++ final @@ -1,53 +1,53 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) { struct vmw_legacy_display *lds = dev_priv->ldu_priv; struct vmw_legacy_display_unit *entry; struct vmw_display_unit *du = NULL; struct drm_framebuffer *fb = NULL; struct drm_crtc *crtc = NULL; int i = 0, ret; /* If there is no display topology the host just assumes * that the guest will set the same layout as the host. */ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) { int w = 0, h = 0; list_for_each_entry(entry, &lds->active, active) { crtc = &entry->base.crtc; w = max(w, crtc->x + crtc->mode.hdisplay); h = max(h, crtc->y + crtc->mode.vdisplay); i++; } if (crtc == NULL) return 0; fb = entry->base.crtc.primary->fb; - return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], fb->bits_per_pixel, fb->depth); + return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], fb->bits_per_pixel, fb->format->depth); } if (!list_empty(&lds->active)) { entry = list_entry(lds->active.next, typeof(*entry), active); fb = entry->base.crtc.primary->fb; - vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0], fb->bits_per_pixel, fb->depth); + vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0], fb->bits_per_pixel, fb->format->depth); } /* Make sure we always show something. */ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active ? lds->num_active : 1); i = 0; list_for_each_entry(entry, &lds->active, active) { crtc = &entry->base.crtc; vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x); vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y); vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay); vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay); vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); i++; } BUG_ON(i != lds->num_active); lds->last_num_active = lds->num_active; /* Find the first du with a cursor. */ list_for_each_entry(entry, &lds->active, active) { du = &entry->base; if (!du->cursor_dmabuf) continue; ret = vmw_cursor_update_dmabuf(dev_priv, du->cursor_dmabuf, 64, 64, du->hotspot_x, du->hotspot_y); if (ret == 0) break; DRM_ERROR("Could not update cursor image\n"); } return 0; }<sep>@@ struct drm_framebuffer *fb; @@ - fb->depth + fb->format->depth @@ struct drm_framebuffer *fb; @@ - (fb->format->depth) + fb->format->depth <|end_of_text|>
8,673
--- initial +++ final @@ -1,7 +1,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) - return addr_fold(skb->nfct); + return addr_fold(skb_nfct(skb)); #else return 0; #endif }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,674
--- initial +++ final @@ -1,60 +1,60 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, const struct ovs_conntrack_info *info, struct sk_buff *skb) { /* If we are recirculating packets to match on conntrack fields and * committing with a separate conntrack action, then we don't need to * actually run the packet through conntrack twice unless it's for a * different zone. */ bool cached = skb_nfct_cached(net, key, info, skb); enum ip_conntrack_info ctinfo; struct nf_conn *ct; if (!cached) { struct nf_conn *tmpl = info->ct; int err; /* Associate skb with specified zone. */ if (tmpl) { - if (skb->nfct) nf_conntrack_put(skb->nfct); + if (skb_nfct(skb)) nf_conntrack_put(skb_nfct(skb)); nf_conntrack_get(&tmpl->ct_general); skb->nfct = &tmpl->ct_general; skb->nfctinfo = IP_CT_NEW; } err = nf_conntrack_in(net, info->family, NF_INET_PRE_ROUTING, skb); if (err != NF_ACCEPT) return -ENOENT; /* Clear CT state NAT flags to mark that we have not yet done * NAT after the nf_conntrack_in() call. We can actually clear * the whole state, as it will be re-initialized below. */ key->ct.state = 0; /* Update the key, but keep the NAT flags. */ ovs_ct_update_key(skb, info, key, true, true); } ct = nf_ct_get(skb, &ctinfo); if (ct) { /* Packets starting a new connection must be NATted before the * helper, so that the helper knows about the NAT. We enforce * this by delaying both NAT and helper calls for unconfirmed * connections until the committing CT action. For later * packets NAT and Helper may be called in either order. * * NAT will be done only if the CT action has NAT, and only * once per packet (per zone), as guarded by the NAT bits in * the key->ct.state. */ if (info->nat && !(key->ct.state & OVS_CS_F_NAT_MASK) && (nf_ct_is_confirmed(ct) || info->commit) && ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) { return -EINVAL; } /* Userspace may decide to perform a ct lookup without a helper * specified followed by a (recirculate and) commit with one. * Therefore, for unconfirmed connections which we will commit, * we need to attach the helper here. */ if (!nf_ct_is_confirmed(ct) && info->commit && info->helper && !nfct_help(ct)) { int err = __nf_ct_try_assign_helper(ct, info->ct, GFP_ATOMIC); if (err) return err; } /* Call the helper only if: * - nf_conntrack_in() was executed above ("!cached") for a * confirmed connection, or * - When committing an unconfirmed connection. */ if ((nf_ct_is_confirmed(ct) ? !cached : info->commit) && ovs_ct_helper(skb, info->family) != NF_ACCEPT) { return -EINVAL; } } return 0; }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,675
--- initial +++ final @@ -1,27 +1,27 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key, const struct ovs_conntrack_info *info, struct sk_buff *skb) { struct nf_conntrack_expect *exp; /* If we pass an expected packet through nf_conntrack_in() the * expectation is typically removed, but the packet could still be * lost in upcall processing. To prevent this from happening we * perform an explicit expectation lookup. Expected connections are * always new, and will be passed through conntrack only when they are * committed, as it is OK to remove the expectation at that time. */ exp = ovs_ct_expect_find(net, &info->zone, info->family, skb); if (exp) { u8 state; /* NOTE: New connections are NATted and Helped only when * committed, so we are not calling into NAT here. */ state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED; __ovs_ct_update_key(key, state, &info->zone, exp->master); } else { struct nf_conn *ct; int err; err = __ovs_ct_lookup(net, key, info, skb); if (err) return err; - ct = (struct nf_conn *)skb->nfct; + ct = (struct nf_conn *)skb_nfct(skb); if (ct) nf_ct_deliver_cached_events(ct); } return 0; }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,676
--- initial +++ final @@ -1,25 +1,25 @@ static void synproxy_send_client_ack(struct net *net, const struct sk_buff *skb, const struct tcphdr *th, const struct synproxy_options *opts) { struct sk_buff *nskb; struct ipv6hdr *iph, *niph; struct tcphdr *nth; unsigned int tcp_hdr_size; iph = ipv6_hdr(skb); tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts); nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER, GFP_ATOMIC); if (nskb == NULL) return; skb_reserve(nskb, MAX_TCP_HEADER); niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr); skb_reset_transport_header(nskb); nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); nth->source = th->source; nth->dest = th->dest; nth->seq = htonl(ntohl(th->seq) + 1); nth->ack_seq = th->ack_seq; tcp_flag_word(nth) = TCP_FLAG_ACK; nth->doff = tcp_hdr_size / 4; nth->window = htons(ntohs(th->window) >> opts->wscale); nth->check = 0; nth->urg_ptr = 0; synproxy_build_options(nth, opts); - synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); + synproxy_send_tcp(net, skb, nskb, skb_nfct(skb), IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,677
--- initial +++ final @@ -1,27 +1,27 @@ static void synproxy_send_client_synack(struct net *net, const struct sk_buff *skb, const struct tcphdr *th, const struct synproxy_options *opts) { struct sk_buff *nskb; struct ipv6hdr *iph, *niph; struct tcphdr *nth; unsigned int tcp_hdr_size; u16 mss = opts->mss; iph = ipv6_hdr(skb); tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts); nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER, GFP_ATOMIC); if (nskb == NULL) return; skb_reserve(nskb, MAX_TCP_HEADER); niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr); skb_reset_transport_header(nskb); nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); nth->source = th->dest; nth->dest = th->source; nth->seq = htonl(__cookie_v6_init_sequence(iph, th, &mss)); nth->ack_seq = htonl(ntohl(th->seq) + 1); tcp_flag_word(nth) = TCP_FLAG_SYN | TCP_FLAG_ACK; if (opts->options & XT_SYNPROXY_OPT_ECN) tcp_flag_word(nth) |= TCP_FLAG_ECE; nth->doff = tcp_hdr_size / 4; nth->window = 0; nth->check = 0; nth->urg_ptr = 0; synproxy_build_options(nth, opts); - synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); + synproxy_send_tcp(net, skb, nskb, skb_nfct(skb), IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,678
--- initial +++ final @@ -1,25 +1,25 @@ static void synproxy_send_client_ack(struct net *net, const struct sk_buff *skb, const struct tcphdr *th, const struct synproxy_options *opts) { struct sk_buff *nskb; struct iphdr *iph, *niph; struct tcphdr *nth; unsigned int tcp_hdr_size; iph = ip_hdr(skb); tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts); nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER, GFP_ATOMIC); if (nskb == NULL) return; skb_reserve(nskb, MAX_TCP_HEADER); niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr); skb_reset_transport_header(nskb); nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); nth->source = th->source; nth->dest = th->dest; nth->seq = htonl(ntohl(th->seq) + 1); nth->ack_seq = th->ack_seq; tcp_flag_word(nth) = TCP_FLAG_ACK; nth->doff = tcp_hdr_size / 4; nth->window = htons(ntohs(th->window) >> opts->wscale); nth->check = 0; nth->urg_ptr = 0; synproxy_build_options(nth, opts); - synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); + synproxy_send_tcp(net, skb, nskb, skb_nfct(skb), IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,679
--- initial +++ final @@ -1,27 +1,27 @@ static void synproxy_send_client_synack(struct net *net, const struct sk_buff *skb, const struct tcphdr *th, const struct synproxy_options *opts) { struct sk_buff *nskb; struct iphdr *iph, *niph; struct tcphdr *nth; unsigned int tcp_hdr_size; u16 mss = opts->mss; iph = ip_hdr(skb); tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts); nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER, GFP_ATOMIC); if (nskb == NULL) return; skb_reserve(nskb, MAX_TCP_HEADER); niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr); skb_reset_transport_header(nskb); nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); nth->source = th->dest; nth->dest = th->source; nth->seq = htonl(__cookie_v4_init_sequence(iph, th, &mss)); nth->ack_seq = htonl(ntohl(th->seq) + 1); tcp_flag_word(nth) = TCP_FLAG_SYN | TCP_FLAG_ACK; if (opts->options & XT_SYNPROXY_OPT_ECN) tcp_flag_word(nth) |= TCP_FLAG_ECE; nth->doff = tcp_hdr_size / 4; nth->window = 0; nth->check = 0; nth->urg_ptr = 0; synproxy_build_options(nth, opts); - synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); + synproxy_send_tcp(net, skb, nskb, skb_nfct(skb), IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,680
--- initial +++ final @@ -1,34 +1,34 @@ static int icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int hooknum) { struct nf_conntrack_tuple innertuple, origtuple; const struct nf_conntrack_l4proto *innerproto; const struct nf_conntrack_tuple_hash *h; const struct nf_conntrack_zone *zone; enum ip_conntrack_info ctinfo; struct nf_conntrack_zone tmp; - NF_CT_ASSERT(skb->nfct == NULL); + NF_CT_ASSERT(skb_nfct(skb) == NULL); zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); /* Are they talking about one of our connections? */ if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb) + ip_hdrlen(skb) + sizeof(struct icmphdr), PF_INET, net, &origtuple)) { pr_debug("icmp_error_message: failed to get tuple\n"); return -NF_ACCEPT; } /* rcu_read_lock()ed by nf_hook_thresh */ innerproto = __nf_ct_l4proto_find(PF_INET, origtuple.dst.protonum); /* Ordinarily, we'd expect the inverted tupleproto, but it's been preserved inside the ICMP. */ if (!nf_ct_invert_tuple(&innertuple, &origtuple, &nf_conntrack_l3proto_ipv4, innerproto)) { pr_debug("icmp_error_message: no match\n"); return -NF_ACCEPT; } ctinfo = IP_CT_RELATED; h = nf_conntrack_find_get(net, zone, &innertuple); if (!h) { pr_debug("icmp_error_message: no match\n"); return -NF_ACCEPT; } if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) ctinfo += IP_CT_IS_REPLY; /* Update skb to refer to this connection */ skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; skb->nfctinfo = ctinfo; return NF_ACCEPT; }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,681
--- initial +++ final @@ -1,24 +1,24 @@ static int icmpv6_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, u8 pf, unsigned int hooknum) { const struct icmp6hdr *icmp6h; struct icmp6hdr _ih; int type; icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih); if (icmp6h == NULL) { if (LOG_INVALID(net, IPPROTO_ICMPV6)) nf_log_packet(net, PF_INET6, 0, skb, NULL, NULL, NULL, "nf_ct_icmpv6: short packet "); return -NF_ACCEPT; } if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) { if (LOG_INVALID(net, IPPROTO_ICMPV6)) nf_log_packet(net, PF_INET6, 0, skb, NULL, NULL, NULL, "nf_ct_icmpv6: ICMPv6 checksum failed "); return -NF_ACCEPT; } type = icmp6h->icmp6_type - 130; if (type >= 0 && type < sizeof(noct_valid_new) && noct_valid_new[type]) { skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; - nf_conntrack_get(skb->nfct); + nf_conntrack_get(skb_nfct(skb)); return NF_ACCEPT; } /* is not error message ? */ if (icmp6h->icmp6_type >= 128) return NF_ACCEPT; return icmpv6_error_message(net, tmpl, skb, dataoff, hooknum); }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,682
--- initial +++ final @@ -1,33 +1,33 @@ static int icmpv6_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int icmp6off, unsigned int hooknum) { struct nf_conntrack_tuple intuple, origtuple; const struct nf_conntrack_tuple_hash *h; const struct nf_conntrack_l4proto *inproto; enum ip_conntrack_info ctinfo; struct nf_conntrack_zone tmp; - NF_CT_ASSERT(skb->nfct == NULL); + NF_CT_ASSERT(skb_nfct(skb) == NULL); /* Are they talking about one of our connections? */ if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb) + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr), PF_INET6, net, &origtuple)) { pr_debug("icmpv6_error: Can't get tuple\n"); return -NF_ACCEPT; } /* rcu_read_lock()ed by nf_hook_thresh */ inproto = __nf_ct_l4proto_find(PF_INET6, origtuple.dst.protonum); /* Ordinarily, we'd expect the inverted tupleproto, but it's been preserved inside the ICMP. */ if (!nf_ct_invert_tuple(&intuple, &origtuple, &nf_conntrack_l3proto_ipv6, inproto)) { pr_debug("icmpv6_error: Can't invert tuple\n"); return -NF_ACCEPT; } ctinfo = IP_CT_RELATED; h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp), &intuple); if (!h) { pr_debug("icmpv6_error: no match\n"); return -NF_ACCEPT; } else { if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) ctinfo += IP_CT_IS_REPLY; } /* Update skb to refer to this connection */ skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; skb->nfctinfo = ctinfo; return NF_ACCEPT; }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,683
--- initial +++ final @@ -1,17 +1,17 @@ static unsigned int ipv4_conntrack_defrag(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct sock *sk = skb->sk; if (sk && sk_fullsock(sk) && (sk->sk_family == PF_INET) && inet_sk(sk)->nodefrag) return NF_ACCEPT; #if IS_ENABLED(CONFIG_NF_CONNTRACK) #if !IS_ENABLED(CONFIG_NF_NAT) /* Previously seen (loopback)? Ignore. Do this before fragment check. */ - if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) return NF_ACCEPT; + if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb))) return NF_ACCEPT; #endif #endif /* Gather fragments. */ if (ip_is_fragment(ip_hdr(skb))) { enum ip_defrag_users user = nf_ct_defrag_user(state->hook, skb); if (nf_ct_ipv4_gather_frags(state->net, skb, user)) return NF_STOLEN; } return NF_ACCEPT; }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,684
--- initial +++ final @@ -1,15 +1,15 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, struct sk_buff *skb) { u16 zone_id = NF_CT_DEFAULT_ZONE_ID; #if IS_ENABLED(CONFIG_NF_CONNTRACK) - if (skb->nfct) { + if (skb_nfct(skb)) { enum ip_conntrack_info ctinfo; const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo)); } #endif if (nf_bridge_in_prerouting(skb)) return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id; if (hooknum == NF_INET_PRE_ROUTING) return IP_DEFRAG_CONNTRACK_IN + zone_id; else return IP_DEFRAG_CONNTRACK_OUT + zone_id; }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,685
--- initial +++ final @@ -1,11 +1,11 @@ static unsigned int ipv6_defrag(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { int err; #if IS_ENABLED(CONFIG_NF_CONNTRACK) /* Previously seen (loopback)? */ - if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) return NF_ACCEPT; + if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb))) return NF_ACCEPT; #endif err = nf_ct_frag6_gather(state->net, skb, nf_ct6_defrag_user(state->hook, skb)); /* queued */ if (err == -EINPROGRESS) return NF_STOLEN; return err == 0 ? NF_ACCEPT : NF_DROP; }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,686
--- initial +++ final @@ -1,15 +1,15 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, struct sk_buff *skb) { u16 zone_id = NF_CT_DEFAULT_ZONE_ID; #if IS_ENABLED(CONFIG_NF_CONNTRACK) - if (skb->nfct) { + if (skb_nfct(skb)) { enum ip_conntrack_info ctinfo; const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo)); } #endif if (nf_bridge_in_prerouting(skb)) return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id; if (hooknum == NF_INET_PRE_ROUTING) return IP6_DEFRAG_CONNTRACK_IN + zone_id; else return IP6_DEFRAG_CONNTRACK_OUT + zone_id; }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,687
--- initial +++ final @@ -1,37 +1,37 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum, const struct in_addr *gw, int oif) { struct iphdr *iph; if (this_cpu_read(nf_skb_duplicated)) return; /* * Copy the skb, and route the copy. Will later return %XT_CONTINUE for * the original skb, which should continue on its way as if nothing has * happened. The copy should be independently delivered to the gateway. */ skb = pskb_copy(skb, GFP_ATOMIC); if (skb == NULL) return; #if IS_ENABLED(CONFIG_NF_CONNTRACK) /* Avoid counting cloned packets towards the original connection. */ nf_reset(skb); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; - nf_conntrack_get(skb->nfct); + nf_conntrack_get(skb_nfct(skb)); #endif /* * If we are in PREROUTING/INPUT, decrease the TTL to mitigate potential * loops between two hosts. * * Set %IP_DF so that the original source is notified of a potentially * decreased MTU on the clone route. IPv6 does this too. * * IP header checksum will be recalculated at ip_local_out. */ iph = ip_hdr(skb); iph->frag_off |= htons(IP_DF); if (hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_IN) --iph->ttl; if (nf_dup_ipv4_route(net, skb, gw, oif)) { __this_cpu_write(nf_skb_duplicated, true); ip_local_out(net, skb->sk, skb); __this_cpu_write(nf_skb_duplicated, false); } else { kfree_skb(skb); } }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,688
--- initial +++ final @@ -1,27 +1,27 @@ static void mangle_contents(struct sk_buff *skb, unsigned int dataoff, unsigned int match_offset, unsigned int match_len, const char *rep_buffer, unsigned int rep_len) { unsigned char *data; BUG_ON(skb_is_nonlinear(skb)); data = skb_network_header(skb) + dataoff; /* move post-replacement */ memmove(data + match_offset + rep_len, data + match_offset + match_len, skb_tail_pointer(skb) - (skb_network_header(skb) + dataoff + match_offset + match_len)); /* insert data from buffer */ memcpy(data + match_offset, rep_buffer, rep_len); /* update skb info */ if (rep_len > match_len) { pr_debug("nf_nat_mangle_packet: Extending packet by " "%u from %u bytes\n", rep_len - match_len, skb->len); skb_put(skb, rep_len - match_len); } else { pr_debug("nf_nat_mangle_packet: Shrinking packet from " "%u from %u bytes\n", match_len - rep_len, skb->len); __skb_trim(skb, skb->len + rep_len - match_len); } - if (nf_ct_l3num((struct nf_conn *)skb->nfct) == NFPROTO_IPV4) { + if (nf_ct_l3num((struct nf_conn *)skb_nfct(skb)) == NFPROTO_IPV4) { /* fix IP hdr checksum information */ ip_hdr(skb)->tot_len = htons(skb->len); ip_send_check(ip_hdr(skb)); } else ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,689
--- initial +++ final @@ -1,16 +1,16 @@ static void skb_release_head_state(struct sk_buff *skb) { skb_dst_drop(skb); #ifdef CONFIG_XFRM secpath_put(skb->sp); #endif if (skb->destructor) { WARN_ON(in_irq()); skb->destructor(skb); } #if IS_ENABLED(CONFIG_NF_CONNTRACK) - nf_conntrack_put(skb->nfct); + nf_conntrack_put(skb_nfct(skb)); #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(skb->nf_bridge); #endif }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,690
--- initial +++ final @@ -1,9 +1,9 @@ void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb) { void (*attach)(struct sk_buff *, const struct sk_buff *); - if (skb->nfct) { + if (skb_nfct(skb)) { rcu_read_lock(); attach = rcu_dereference(ip_ct_attach); if (attach) attach(new, skb); rcu_read_unlock(); } }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,691
--- initial +++ final @@ -1,83 +1,83 @@ unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, struct sk_buff *skb) { struct nf_conn *ct, *tmpl; enum ip_conntrack_info ctinfo; struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; unsigned int *timeouts; unsigned int dataoff; u_int8_t protonum; int set_reply = 0; int ret; tmpl = nf_ct_get(skb, &ctinfo); if (tmpl) { /* Previously seen (loopback or untracked)? Ignore. */ if (!nf_ct_is_template(tmpl)) { NF_CT_STAT_INC_ATOMIC(net, ignore); return NF_ACCEPT; } skb->nfct = NULL; } /* rcu_read_lock()ed by nf_hook_thresh */ l3proto = __nf_ct_l3proto_find(pf); ret = l3proto->get_l4proto(skb, skb_network_offset(skb), &dataoff, &protonum); if (ret <= 0) { pr_debug("not prepared to track yet or error occurred\n"); NF_CT_STAT_INC_ATOMIC(net, error); NF_CT_STAT_INC_ATOMIC(net, invalid); ret = -ret; goto out; } l4proto = __nf_ct_l4proto_find(pf, protonum); /* It may be an special packet, error, unclean... * inverse of the return code tells to the netfilter * core what to do with the packet. */ if (l4proto->error != NULL) { ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum); if (ret <= 0) { NF_CT_STAT_INC_ATOMIC(net, error); NF_CT_STAT_INC_ATOMIC(net, invalid); ret = -ret; goto out; } /* ICMP[v6] protocol trackers may assign one conntrack. */ - if (skb->nfct) goto out; + if (skb_nfct(skb)) goto out; } repeat: ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, l3proto, l4proto, &set_reply, &ctinfo); if (!ct) { /* Not valid part of a connection */ NF_CT_STAT_INC_ATOMIC(net, invalid); ret = NF_ACCEPT; goto out; } if (IS_ERR(ct)) { /* Too stressed to deal. */ NF_CT_STAT_INC_ATOMIC(net, drop); ret = NF_DROP; goto out; } NF_CT_ASSERT(skb_nfct(skb)); /* Decide what timeout policy we want to apply to this flow. */ timeouts = nf_ct_timeout_lookup(net, ct, l4proto); ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts); if (ret <= 0) { /* Invalid: inverse of the return code tells * the netfilter core what to do */ pr_debug("nf_conntrack_in: Can't track with proto module\n"); nf_conntrack_put(&ct->ct_general); skb->nfct = NULL; NF_CT_STAT_INC_ATOMIC(net, invalid); if (ret == -NF_DROP) NF_CT_STAT_INC_ATOMIC(net, drop); /* Special case: TCP tracker reports an attempt to reopen a * closed/aborted connection. We have to go back and create a * fresh conntrack. */ if (ret == -NF_REPEAT) goto repeat; ret = -ret; goto out; } if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) nf_conntrack_event_cache(IPCT_REPLY, ct); out: if (tmpl) nf_ct_put(tmpl); return ret; }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,692
--- initial +++ final @@ -1,21 +1,21 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum, const struct in6_addr *gw, int oif) { if (this_cpu_read(nf_skb_duplicated)) return; skb = pskb_copy(skb, GFP_ATOMIC); if (skb == NULL) return; #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_reset(skb); nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); - nf_conntrack_get(skb->nfct); + nf_conntrack_get(skb_nfct(skb)); #endif if (hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_IN) { struct ipv6hdr *iph = ipv6_hdr(skb); --iph->hop_limit; } if (nf_dup_ipv6_route(net, skb, gw, oif)) { __this_cpu_write(nf_skb_duplicated, true); ip6_local_out(net, skb->sk, skb); __this_cpu_write(nf_skb_duplicated, false); } else { kfree_skb(skb); } }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,693
--- initial +++ final @@ -1,7 +1,7 @@ static unsigned int notrack_tg(struct sk_buff *skb, const struct xt_action_param *par) { /* Previously seen (loopback)? Ignore. */ - if (skb->nfct != NULL) return XT_CONTINUE; + if (skb_nfct(skb) != NULL) return XT_CONTINUE; nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); nf_conntrack_get(skb_nfct(skb)); return XT_CONTINUE; }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,694
--- initial +++ final @@ -1,9 +1,9 @@ static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct) { /* Previously seen (loopback)? Ignore. */ - if (skb->nfct != NULL) return XT_CONTINUE; + if (skb_nfct(skb) != NULL) return XT_CONTINUE; /* special case the untracked ct : we want the percpu object */ if (!ct) ct = nf_ct_untracked_get(); atomic_inc(&ct->ct_general.use); nf_ct_set(skb, ct, IP_CT_NEW); return XT_CONTINUE; }<sep>@@ struct sk_buff *skb; expression e; statement S1, S2; @@ ( skb->nfct = e | - skb->nfct + skb_nfct(skb) ) <|end_of_text|>
8,695
--- initial +++ final @@ -1,45 +1,45 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, struct ieee80211_sta_ht_cap *ht_cap) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); ht_cap->ht_supported = true; ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU; if (rtlpriv->rtlhal.disable_amsdu_8k) ht_cap->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU; /* *Maximum length of AMPDU that the STA can receive. *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) */ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; /*Minimum MPDU start spacing , */ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; /*hw->wiphy->bands[NL80211_BAND_2GHZ] *base on ant_num *rx_mask: RX mask *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7 *if rx_ant = 2 rx_mask[1]= 0xff;==>MCS8-MCS15 *if rx_ant >= 3 rx_mask[2]= 0xff; *if BW_40 rx_mask[4]= 0x01; *highest supported RX rate */ if (rtlpriv->dm.supp_phymode_switch) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Support phy mode switch\n"); + pr_err("Support phy mode switch\n"); ht_cap->mcs.rx_mask[0] = 0xFF; ht_cap->mcs.rx_mask[1] = 0xFF; ht_cap->mcs.rx_mask[4] = 0x01; ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15); } else { if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_2T2R) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "1T2R or 2T2R\n"); ht_cap->mcs.rx_mask[0] = 0xFF; ht_cap->mcs.rx_mask[1] = 0xFF; ht_cap->mcs.rx_mask[4] = 0x01; ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15); } else if (get_rf_type(rtlphy) == RF_1T1R) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n"); ht_cap->mcs.rx_mask[0] = 0xFF; ht_cap->mcs.rx_mask[1] = 0x00; ht_cap->mcs.rx_mask[4] = 0x01; ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7); } } }<sep>@@ expression e1; expression list e2; @@ - RT_TRACE(e1, COMP_INIT, DBG_EMERG, e2) + pr_err(e2) <|end_of_text|>
8,821
--- initial +++ final @@ -1,99 +1,99 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct ieee80211_supported_band *sband; if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY && rtlhal->bandset == BAND_ON_BOTH) { /* 1: 2.4 G bands */ /* <1> use mac->bands as mem for hw->wiphy->bands */ sband = &(rtlmac->bands[NL80211_BAND_2GHZ]); /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ] * to default value(1T1R) */ memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]), &rtl_band_2ghz, sizeof(struct ieee80211_supported_band)); /* <3> init ht cap base on ant_num */ _rtl_init_hw_ht_capab(hw, &sband->ht_cap); /* <4> set mac->sband to wiphy->sband */ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; /* 2: 5 G bands */ /* <1> use mac->bands as mem for hw->wiphy->bands */ sband = &(rtlmac->bands[NL80211_BAND_5GHZ]); /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ] * to default value(1T1R) */ memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]), &rtl_band_5ghz, sizeof(struct ieee80211_supported_band)); /* <3> init ht cap base on ant_num */ _rtl_init_hw_ht_capab(hw, &sband->ht_cap); _rtl_init_hw_vht_capab(hw, &sband->vht_cap); /* <4> set mac->sband to wiphy->sband */ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; } else { if (rtlhal->current_bandtype == BAND_ON_2_4G) { /* <1> use mac->bands as mem for hw->wiphy->bands */ sband = &(rtlmac->bands[NL80211_BAND_2GHZ]); /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ] * to default value(1T1R) */ memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]), &rtl_band_2ghz, sizeof(struct ieee80211_supported_band)); /* <3> init ht cap base on ant_num */ _rtl_init_hw_ht_capab(hw, &sband->ht_cap); /* <4> set mac->sband to wiphy->sband */ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; } else if (rtlhal->current_bandtype == BAND_ON_5G) { /* <1> use mac->bands as mem for hw->wiphy->bands */ sband = &(rtlmac->bands[NL80211_BAND_5GHZ]); /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ] * to default value(1T1R) */ memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]), &rtl_band_5ghz, sizeof(struct ieee80211_supported_band)); /* <3> init ht cap base on ant_num */ _rtl_init_hw_ht_capab(hw, &sband->ht_cap); _rtl_init_hw_vht_capab(hw, &sband->vht_cap); /* <4> set mac->sband to wiphy->sband */ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; } else { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n", rtlhal->current_bandtype); + pr_err("Err BAND %d\n", rtlhal->current_bandtype); } } /* <5> set hw caps */ ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); /* swlps or hwlps has been set in diff chip in init_sw_vars */ if (rtlpriv->psc.swctrl_lps) { ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, PS_NULLFUNC_STACK); } hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; hw->wiphy->rts_threshold = 2347; hw->queues = AC_MAX; hw->extra_tx_headroom = RTL_TX_HEADER_SIZE; /* TODO: Correct this value for our hw */ /* TODO: define these hard code value */ hw->max_listen_interval = 10; hw->max_rate_tries = 4; /* hw->max_rates = 1; */ hw->sta_data_size = sizeof(struct rtl_sta_info); /* wowlan is not supported by kernel if CONFIG_PM is not defined */ #ifdef CONFIG_PM if (rtlpriv->psc.wo_wlan_mode) { if (rtlpriv->psc.wo_wlan_mode & WAKE_ON_MAGIC_PACKET) rtlpriv->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT; if (rtlpriv->psc.wo_wlan_mode & WAKE_ON_PATTERN_MATCH) { rtlpriv->wowlan.n_patterns = MAX_SUPPORT_WOL_PATTERN_NUM; rtlpriv->wowlan.pattern_min_len = MIN_WOL_PATTERN_SIZE; rtlpriv->wowlan.pattern_max_len = MAX_WOL_PATTERN_SIZE; } hw->wiphy->wowlan = &rtlpriv->wowlan; } #endif /* <6> mac address */ if (is_valid_ether_addr(rtlefuse->dev_addr)) { SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr); } else { u8 rtlmac1[] = {0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00}; get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1); SET_IEEE80211_PERM_ADDR(hw, rtlmac1); } }<sep>@@ expression e1; expression list e2; @@ - RT_TRACE(e1, COMP_INIT, DBG_EMERG, e2) + pr_err(e2) <|end_of_text|>
8,822
--- initial +++ final @@ -1,86 +1,86 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw, u8 loadfw_status) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rt_firmware *firmware = (struct rt_firmware *)rtlhal->pfirmware; u32 tmpu4b; u8 cpustatus = 0; short pollingcnt = 1000; bool rtstatus = true; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "LoadStaus(%d)\n", loadfw_status); firmware->fwstatus = (enum fw_status)loadfw_status; switch (loadfw_status) { case FW_STATUS_LOAD_IMEM: /* Polling IMEM code done. */ do { cpustatus = rtl_read_byte(rtlpriv, TCR); if (cpustatus & IMEM_CODE_DONE) break; udelay(5); } while (pollingcnt--); if (!(cpustatus & IMEM_CHK_RPT) || (pollingcnt <= 0)) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "FW_STATUS_LOAD_IMEM FAIL CPU, Status=%x\n", cpustatus); goto status_check_fail; } break; case FW_STATUS_LOAD_EMEM: /* Check Put Code OK and Turn On CPU */ /* Polling EMEM code done. */ do { cpustatus = rtl_read_byte(rtlpriv, TCR); if (cpustatus & EMEM_CODE_DONE) break; udelay(5); } while (pollingcnt--); if (!(cpustatus & EMEM_CHK_RPT) || (pollingcnt <= 0)) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "FW_STATUS_LOAD_EMEM FAIL CPU, Status=%x\n", cpustatus); goto status_check_fail; } /* Turn On CPU */ rtstatus = _rtl92s_firmware_enable_cpu(hw); if (!rtstatus) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Enable CPU fail!\n"); goto status_check_fail; } break; case FW_STATUS_LOAD_DMEM: /* Polling DMEM code done */ do { cpustatus = rtl_read_byte(rtlpriv, TCR); if (cpustatus & DMEM_CODE_DONE) break; udelay(5); } while (pollingcnt--); if (!(cpustatus & DMEM_CODE_DONE) || (pollingcnt <= 0)) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Polling DMEM code done fail ! cpustatus(%#x)\n", cpustatus); goto status_check_fail; } RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "DMEM code download success, cpustatus(%#x)\n", cpustatus); /* Prevent Delay too much and being scheduled out */ /* Polling Load Firmware ready */ pollingcnt = 2000; do { cpustatus = rtl_read_byte(rtlpriv, TCR); if (cpustatus & FWRDY) break; udelay(40); } while (pollingcnt--); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Polling Load Firmware ready, cpustatus(%x)\n", cpustatus); if (((cpustatus & LOAD_FW_READY) != LOAD_FW_READY) || (pollingcnt <= 0)) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Polling Load Firmware ready fail ! cpustatus(%x)\n", cpustatus); goto status_check_fail; } /* If right here, we can set TCR/RCR to desired value */ /* and config MAC lookback mode to normal mode */ tmpu4b = rtl_read_dword(rtlpriv, TCR); rtl_write_dword(rtlpriv, TCR, (tmpu4b & (~TCR_ICV))); tmpu4b = rtl_read_dword(rtlpriv, RCR); rtl_write_dword(rtlpriv, RCR, (tmpu4b | RCR_APPFCS | RCR_APP_ICV | RCR_APP_MIC)); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Current RCR settings(%#x)\n", tmpu4b); /* Set to normal mode. */ rtl_write_byte(rtlpriv, LBKMD_SEL, LBK_NORMAL); break; default: - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown status check!\n"); + pr_err("Unknown status check!\n"); rtstatus = false; break; } status_check_fail: RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "loadfw_status(%d), rtstatus(%x)\n", loadfw_status, rtstatus); return rtstatus; }<sep>@@ expression e1; expression list e2; @@ - RT_TRACE(e1, COMP_INIT, DBG_EMERG, e2) + pr_err(e2) <|end_of_text|>
8,823
--- initial +++ final @@ -1,11 +1,11 @@ static u8 _rtl92s_firmware_header_map_rftype(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); switch (rtlphy->rf_type) { case RF_1T1R: return 0x11; case RF_1T2R: return 0x12; case RF_2T2R: return 0x22; - default: RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown RF type(%x)\n", rtlphy->rf_type); break; + default: pr_err("Unknown RF type(%x)\n", rtlphy->rf_type); break; } return 0x22; }<sep>@@ expression e1; expression list e2; @@ - RT_TRACE(e1, COMP_INIT, DBG_EMERG, e2) + pr_err(e2) <|end_of_text|>
8,824
--- initial +++ final @@ -1,42 +1,42 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); bool rtstatus = true; /* 1. Read PHY_REG.TXT BB INIT!! */ /* We will separate as 1T1R/1T2R/1T2R_GREEN/2T2R */ if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_2T2R || rtlphy->rf_type == RF_1T1R || rtlphy->rf_type == RF_2T2R_GREEN) { rtstatus = _rtl92s_phy_config_bb(hw, BASEBAND_CONFIG_PHY_REG); if (rtlphy->rf_type != RF_2T2R && rtlphy->rf_type != RF_2T2R_GREEN) /* so we should reconfig BB reg with the right * PHY parameters. */ rtstatus = _rtl92s_phy_set_bb_to_diff_rf(hw, BASEBAND_CONFIG_PHY_REG); } else { rtstatus = false; } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Write BB Reg Fail!!\n"); + pr_err("Write BB Reg Fail!!\n"); goto phy_BB8190_Config_ParaFile_Fail; } /* 2. If EEPROM or EFUSE autoload OK, We must config by * PHY_REG_PG.txt */ if (rtlefuse->autoload_failflag == false) { rtlphy->pwrgroup_cnt = 0; rtstatus = _rtl92s_phy_config_bb_with_pg(hw, BASEBAND_CONFIG_PHY_REG); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n"); + pr_err("_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n"); goto phy_BB8190_Config_ParaFile_Fail; } /* 3. BB AGC table Initialization */ rtstatus = _rtl92s_phy_config_bb(hw, BASEBAND_CONFIG_AGC_TAB); if (!rtstatus) { pr_err("%s(): AGC Table Fail\n", __func__); goto phy_BB8190_Config_ParaFile_Fail; } /* Check if the CCK HighPower is turned ON. */ /* This is used to calculate PWDB. */ rtlphy->cck_high_power = (bool)(rtl92s_phy_query_bb_reg(hw, RFPGA0_XA_HSSIPARAMETER2, 0x200)); phy_BB8190_Config_ParaFile_Fail: return rtstatus; }<sep>@@ expression e1; expression list e2; @@ - RT_TRACE(e1, COMP_INIT, DBG_EMERG, e2) + pr_err(e2) <|end_of_text|>
8,826
--- initial +++ final @@ -1,25 +1,25 @@ bool rtl92s_phy_bb_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); bool rtstatus = true; u8 pathmap, index, rf_num = 0; u8 path1, path2; _rtl92s_phy_init_register_definition(hw); /* Config BB and AGC */ rtstatus = _rtl92s_phy_bb_config_parafile(hw); /* Check BB/RF confiuration setting. */ /* We only need to configure RF which is turned on. */ path1 = (u8)(rtl92s_phy_query_bb_reg(hw, RFPGA0_TXINFO, 0xf)); mdelay(10); path2 = (u8)(rtl92s_phy_query_bb_reg(hw, ROFDM0_TRXPATHENABLE, 0xf)); pathmap = path1 | path2; rtlphy->rf_pathmap = pathmap; for (index = 0; index < 4; index++) { if ((pathmap >> index) & 0x1) rf_num++; } if ((rtlphy->rf_type == RF_1T1R && rf_num != 1) || (rtlphy->rf_type == RF_1T2R && rf_num != 2) || (rtlphy->rf_type == RF_2T2R && rf_num != 2) || (rtlphy->rf_type == RF_2T2R_GREEN && rf_num != 2)) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "RF_Type(%x) does not match RF_Num(%x)!!\n", rtlphy->rf_type, rf_num); - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "path1 0x%x, path2 0x%x, pathmap 0x%x\n", path1, path2, pathmap); + pr_err("RF_Type(%x) does not match RF_Num(%x)!!\n", rtlphy->rf_type, rf_num); + pr_err("path1 0x%x, path2 0x%x, pathmap 0x%x\n", path1, path2, pathmap); } return rtstatus; }<sep>@@ expression e1; expression list e2; @@ - RT_TRACE(e1, COMP_INIT, DBG_EMERG, e2) + pr_err(e2) <|end_of_text|>
8,827
--- initial +++ final @@ -1,28 +1,28 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); enum version_8192d chipver = rtlpriv->rtlhal.version; u8 cutvalue[2]; u16 chipvalue; rtlpriv->intf_ops->read_efuse_byte(hw, EEPROME_CHIP_VERSION_H, &cutvalue[1]); rtlpriv->intf_ops->read_efuse_byte(hw, EEPROME_CHIP_VERSION_L, &cutvalue[0]); chipvalue = (cutvalue[1] << 8) | cutvalue[0]; switch (chipvalue) { case 0xAA55: chipver |= CHIP_92D_C_CUT; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "C-CUT!!!\n"); break; case 0x9966: chipver |= CHIP_92D_D_CUT; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n"); break; case 0xCC33: chipver |= CHIP_92D_E_CUT; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n"); break; default: chipver |= CHIP_92D_D_CUT; - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown CUT!\n"); + pr_err("Unknown CUT!\n"); break; } rtlpriv->rtlhal.version = chipver; }<sep>@@ expression e1; expression list e2; @@ - RT_TRACE(e1, COMP_INIT, DBG_EMERG, e2) + pr_err(e2) <|end_of_text|>
8,828
--- initial +++ final @@ -1,21 +1,21 @@ static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw, bool wmm_enable, u8 queue_sel) { u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ; struct rtl_priv *rtlpriv = rtl_priv(hw); if (!wmm_enable) { /* typical setting */ beQ = QUEUE_LOW; bkQ = QUEUE_LOW; viQ = QUEUE_NORMAL; voQ = QUEUE_HIGH; mgtQ = QUEUE_HIGH; hiQ = QUEUE_HIGH; } else { /* for WMM */ beQ = QUEUE_LOW; bkQ = QUEUE_NORMAL; viQ = QUEUE_NORMAL; voQ = QUEUE_HIGH; mgtQ = QUEUE_HIGH; hiQ = QUEUE_HIGH; } _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ); - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Tx queue select :0x%02x..\n", queue_sel); + pr_err("Tx queue select :0x%02x..\n", queue_sel); }<sep>@@ expression e1; expression list e2; @@ - RT_TRACE(e1, COMP_INIT, DBG_EMERG, e2) + pr_err(e2) <|end_of_text|>
8,829
--- initial +++ final @@ -1,26 +1,26 @@ static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw, bool wmm_enable, u8 out_ep_num, u8 queue_sel) { u8 hq_sele = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); switch (out_ep_num) { case 2: /* (TX_SELE_HQ|TX_SELE_LQ) */ if (!wmm_enable) /* typical setting */ hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ | HQSEL_HIQ; else /* for WMM */ hq_sele = HQSEL_VOQ | HQSEL_BEQ | HQSEL_MGTQ | HQSEL_HIQ; break; case 1: if (TX_SELE_LQ == queue_sel) { /* map all endpoint to Low queue */ hq_sele = 0; } else if (TX_SELE_HQ == queue_sel) { /* map all endpoint to High queue */ hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_BEQ | HQSEL_BKQ | HQSEL_MGTQ | HQSEL_HIQ; } break; default: WARN_ON(1); /* Shall not reach here! */ break; } rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL + 1), hq_sele); - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Tx queue select :0x%02x..\n", hq_sele); + pr_err("Tx queue select :0x%02x..\n", hq_sele); }<sep>@@ expression e1; expression list e2; @@ - RT_TRACE(e1, COMP_INIT, DBG_EMERG, e2) + pr_err(e2) <|end_of_text|>
8,830
--- initial +++ final @@ -1,66 +1,66 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); int status = 0; u16 value16; u8 value8; /* polling autoload done. */ u32 pollingCount = 0; do { if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Autoload Done!\n"); break; } if (pollingCount++ > 100) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n"); + pr_err("Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n"); return -ENODEV; } } while (true); /* 0. RSV_CTRL 0x1C[7:0] = 0 unlock ISO/CLK/Power control register */ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0); /* Power on when re-enter from IPS/Radio off/card disable */ /* enable SPS into PWM mode */ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); udelay(100); value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL); if (0 == (value8 & LDV12_EN)) { value8 |= LDV12_EN; rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, " power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x\n", value8); udelay(100); value8 = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL); value8 &= ~ISO_MD2PP; rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, value8); } /* auto enable WLAN */ pollingCount = 0; value16 = rtl_read_word(rtlpriv, REG_APS_FSMCO); value16 |= APFM_ONMAC; rtl_write_word(rtlpriv, REG_APS_FSMCO, value16); do { if (!(rtl_read_word(rtlpriv, REG_APS_FSMCO) & APFM_ONMAC)) { pr_info("MAC auto ON okay!\n"); break; } if (pollingCount++ > 1000) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n"); + pr_err("Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n"); return -ENODEV; } } while (true); /* Enable Radio ,GPIO ,and LED function */ rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x0812); /* release RF digital isolation */ value16 = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL); value16 &= ~ISO_DIOR; rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, value16); /* Reconsider when to do this operation after asking HWSD. */ pollingCount = 0; rtl_write_byte(rtlpriv, REG_APSD_CTRL, (rtl_read_byte(rtlpriv, REG_APSD_CTRL) & ~BIT(6))); do { pollingCount++; } while ((pollingCount < 200) && (rtl_read_byte(rtlpriv, REG_APSD_CTRL) & BIT(7))); /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */ value16 = rtl_read_word(rtlpriv, REG_CR); value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN | PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN | ENSEC); rtl_write_word(rtlpriv, REG_CR, value16); return status; }<sep>@@ expression e1; expression list e2; @@ - RT_TRACE(e1, COMP_INIT, DBG_EMERG, e2) + pr_err(e2) <|end_of_text|>
8,831
--- initial +++ final @@ -1,43 +1,43 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); enum version_8192c version = VERSION_UNKNOWN; u32 value32; const char *versionid; value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG); if (value32 & TRP_VAUX_EN) { version = (value32 & TYPE_ID) ? VERSION_A_CHIP_92C : VERSION_A_CHIP_88C; } else { version = (enum version_8192c)(CHIP_VER_B | ((value32 & TYPE_ID) ? CHIP_92C_BITMASK : 0) | ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0)); if ((!IS_CHIP_VENDOR_UMC(version)) && (value32 & CHIP_VER_RTL_MASK)) { version = (enum version_8192c)(version | ((((value32 & CHIP_VER_RTL_MASK) == BIT(12)) ? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) | CHIP_VENDOR_UMC)); } if (IS_92C_SERIAL(version)) { value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM); version = (enum version_8192c)(version | ((CHIP_BONDING_IDENTIFIER(value32) == CHIP_BONDING_92C_1T2R) ? RF_TYPE_1T2R : 0)); } } switch (version) { case VERSION_B_CHIP_92C: versionid = "B_CHIP_92C"; break; case VERSION_B_CHIP_88C: versionid = "B_CHIP_88C"; break; case VERSION_A_CHIP_92C: versionid = "A_CHIP_92C"; break; case VERSION_A_CHIP_88C: versionid = "A_CHIP_88C"; break; case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT: versionid = "A_CUT_92C_1T2R"; break; case VERSION_NORMAL_UMC_CHIP_92C_A_CUT: versionid = "A_CUT_92C"; break; case VERSION_NORMAL_UMC_CHIP_88C_A_CUT: versionid = "A_CUT_88C"; break; case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT: versionid = "B_CUT_92C_1T2R"; break; case VERSION_NORMAL_UMC_CHIP_92C_B_CUT: versionid = "B_CUT_92C"; break; case VERSION_NORMAL_UMC_CHIP_88C_B_CUT: versionid = "B_CUT_88C"; break; default: versionid = "Unknown. Bug?"; break; } - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Chip Version ID: %s\n", versionid); + pr_err("Chip Version ID: %s\n", versionid); switch (version & 0x3) { case CHIP_88C: rtlphy->rf_type = RF_1T1R; break; case CHIP_92C: rtlphy->rf_type = RF_2T2R; break; case CHIP_92C_1T2R: rtlphy->rf_type = RF_1T2R; break; default: rtlphy->rf_type = RF_1T1R; RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "ERROR RF_Type is set!!\n"); break; } RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n", rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R"); return version; }<sep>@@ expression e1; expression list e2; @@ - RT_TRACE(e1, COMP_INIT, DBG_EMERG, e2) + pr_err(e2) <|end_of_text|>
8,832
--- initial +++ final @@ -1,38 +1,38 @@ void smp_callin(void) { int cpuid = hard_smp_processor_id(); if (cpu_online(cpuid)) { printk("??, cpu 0x%x already present??\n", cpuid); BUG(); } set_cpu_online(cpuid, true); /* Turn on machine checks. */ wrmces(7); /* Set trap vectors. */ trap_init(); /* Set interrupt vector. */ wrent(entInt, 0); /* Get our local ticker going. */ smp_setup_percpu_timer(cpuid); init_clockevent(); /* Call platform-specific callin, if specified */ if (alpha_mv.smp_callin) alpha_mv.smp_callin(); /* All kernel threads share the same mm context. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; /* inform the notifiers about the new cpu */ notify_cpu_starting(cpuid); /* Must have completely accurate bogos. */ local_irq_enable(); /* Wait boot CPU to stop with irq enabled before running calibrate_delay. */ wait_boot_cpu_to_stop(cpuid); mb(); calibrate_delay(); smp_store_cpu_info(cpuid); /* Allow master to continue only after we written loops_per_jiffy. */ wmb(); smp_secondary_alive = 1; DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n", cpuid, current, current->active_mm)); preempt_disable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,833
--- initial +++ final @@ -1,19 +1,19 @@ void start_kernel_secondary(void) { struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); /* MMU, Caches, Vector Table, Interrupts etc */ setup_processor(); atomic_inc(&mm->mm_users); - atomic_inc(&mm->mm_count); + mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); /* Some SMP H/w setup - for each cpu */ if (plat_smp_ops.init_per_cpu) plat_smp_ops.init_per_cpu(cpu); if (machine_desc->init_per_cpu) machine_desc->init_per_cpu(cpu); notify_cpu_starting(cpu); set_cpu_online(cpu, true); pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); local_irq_enable(); preempt_disable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,834
--- initial +++ final @@ -1,50 +1,50 @@ asmlinkage void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu; cpu = task_cpu(current); set_my_cpu_offset(per_cpu_offset(cpu)); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ - atomic_inc(&mm->mm_count); + mmgrab(mm); current->active_mm = mm; /* * TTBR0 is only used for the identity mapping at this stage. Make it * point to zero page to avoid speculatively fetching new entries. */ cpu_uninstall_idmap(); preempt_disable(); trace_hardirqs_off(); /* * If the system has established the capabilities, make sure * this CPU ticks all of those. If it doesn't, the CPU will * fail to come online. */ check_local_cpu_capabilities(); if (cpu_ops[cpu]->cpu_postboot) cpu_ops[cpu]->cpu_postboot(); /* * Log the CPU info before it is marked online and might get read. */ cpuinfo_store_cpu(); /* * Enable GIC and timers. */ notify_cpu_starting(cpu); store_cpu_topology(cpu); /* * OK, now it's safe to let the boot CPU continue. Wait for * the CPU migration code to notice that the CPU is online * before we continue. */ pr_info("CPU%u: Booted secondary processor [%08x]\n", cpu, read_cpuid_id()); update_cpu_boot_status(CPU_BOOT_SUCCESS); set_cpu_online(cpu, true); complete(&cpu_running); local_irq_enable(); local_async_enable(); /* * OK, it's off to the idle thread for us */ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,835
--- initial +++ final @@ -1,45 +1,45 @@ asmlinkage void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu; /* * The identity mapping is uncached (strongly ordered), so * switch away from it before attempting any exclusive accesses. */ cpu_switch_mm(mm->pgd, mm); local_flush_bp_all(); enter_lazy_tlb(mm, current); local_flush_tlb_all(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ cpu = smp_processor_id(); - atomic_inc(&mm->mm_count); + mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); cpu_init(); pr_debug("CPU%u: Booted secondary processor\n", cpu); preempt_disable(); trace_hardirqs_off(); /* * Give the platform a chance to do its own initialisation. */ if (smp_ops.smp_secondary_init) smp_ops.smp_secondary_init(cpu); notify_cpu_starting(cpu); calibrate_delay(); smp_store_cpu_info(cpu); /* * OK, now it's safe to let the boot CPU continue. Wait for * the CPU migration code to notice that the CPU is online * before we continue - which happens after __cpu_up returns. */ set_cpu_online(cpu, true); complete(&cpu_running); local_irq_enable(); local_fiq_enable(); local_abt_enable(); /* * OK, it's off to the idle thread for us */ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,836
--- initial +++ final @@ -1,65 +1,65 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) { static DEFINE_MUTEX(oom_adj_mutex); struct mm_struct *mm = NULL; struct task_struct *task; int err = 0; task = get_proc_task(file_inode(file)); if (!task) return -ESRCH; mutex_lock(&oom_adj_mutex); if (legacy) { if (oom_adj < task->signal->oom_score_adj && !capable(CAP_SYS_RESOURCE)) { err = -EACCES; goto err_unlock; } /* * /proc/pid/oom_adj is provided for legacy purposes, ask users to use * /proc/pid/oom_score_adj instead. */ pr_warn_once("%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n", current->comm, task_pid_nr(current), task_pid_nr(task), task_pid_nr(task)); } else { if ((short)oom_adj < task->signal->oom_score_adj_min && !capable(CAP_SYS_RESOURCE)) { err = -EACCES; goto err_unlock; } } /* * Make sure we will check other processes sharing the mm if this is * not vfrok which wants its own oom_score_adj. * pin the mm so it doesn't go away and get reused after task_unlock */ if (!task->vfork_done) { struct task_struct *p = find_lock_task_mm(task); if (p) { if (atomic_read(&p->mm->mm_users) > 1) { mm = p->mm; - atomic_inc(&mm->mm_count); + mmgrab(mm); } task_unlock(p); } } task->signal->oom_score_adj = oom_adj; if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) task->signal->oom_score_adj_min = (short)oom_adj; trace_oom_score_adj_update(task); if (mm) { struct task_struct *p; rcu_read_lock(); for_each_process(p) { if (same_thread_group(task, p)) continue; /* do not touch kernel threads or the global init */ if (p->flags & PF_KTHREAD || is_global_init(p)) continue; task_lock(p); if (!p->vfork_done && process_shares_mm(p, mm)) { pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n", task_pid_nr(p), p->comm, p->signal->oom_score_adj, oom_adj, task_pid_nr(task), task->comm); p->signal->oom_score_adj = oom_adj; if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) p->signal->oom_score_adj_min = (short)oom_adj; } task_unlock(p); } rcu_read_unlock(); mmdrop(mm); } err_unlock: mutex_unlock(&oom_adj_mutex); put_task_struct(task); return err; }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,837
--- initial +++ final @@ -1,15 +1,15 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode) { struct task_struct *task = get_proc_task(inode); struct mm_struct *mm = ERR_PTR(-ESRCH); if (task) { mm = mm_access(task, mode | PTRACE_MODE_FSCREDS); put_task_struct(task); if (!IS_ERR_OR_NULL(mm)) { /* ensure this mm_struct can't be freed */ - atomic_inc(&mm->mm_count); + mmgrab(mm); /* but do not pin its memory */ mmput(mm); } } return mm; }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,838
--- initial +++ final @@ -1,141 +1,141 @@ void __init sched_init(void) { int i, j; unsigned long alloc_size = 0, ptr; sched_clock_init(); for (i = 0; i < WAIT_TABLE_SIZE; i++) init_waitqueue_head(bit_wait_table + i); #ifdef CONFIG_FAIR_GROUP_SCHED alloc_size += 2 * nr_cpu_ids * sizeof(void **); #endif #ifdef CONFIG_RT_GROUP_SCHED alloc_size += 2 * nr_cpu_ids * sizeof(void **); #endif if (alloc_size) { ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); #ifdef CONFIG_FAIR_GROUP_SCHED root_task_group.se = (struct sched_entity **)ptr; ptr += nr_cpu_ids * sizeof(void **); root_task_group.cfs_rq = (struct cfs_rq **)ptr; ptr += nr_cpu_ids * sizeof(void **); #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED root_task_group.rt_se = (struct sched_rt_entity **)ptr; ptr += nr_cpu_ids * sizeof(void **); root_task_group.rt_rq = (struct rt_rq **)ptr; ptr += nr_cpu_ids * sizeof(void **); #endif /* CONFIG_RT_GROUP_SCHED */ } #ifdef CONFIG_CPUMASK_OFFSTACK for_each_possible_cpu(i) { per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(cpumask_size(), GFP_KERNEL, cpu_to_node(i)); per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(cpumask_size(), GFP_KERNEL, cpu_to_node(i)); } #endif /* CONFIG_CPUMASK_OFFSTACK */ init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime()); #ifdef CONFIG_SMP init_defrootdomain(); #endif #ifdef CONFIG_RT_GROUP_SCHED init_rt_bandwidth(&root_task_group.rt_bandwidth, global_rt_period(), global_rt_runtime()); #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_CGROUP_SCHED task_group_cache = KMEM_CACHE(task_group, 0); list_add(&root_task_group.list, &task_groups); INIT_LIST_HEAD(&root_task_group.children); INIT_LIST_HEAD(&root_task_group.siblings); autogroup_init(&init_task); #endif /* CONFIG_CGROUP_SCHED */ for_each_possible_cpu(i) { struct rq *rq; rq = cpu_rq(i); raw_spin_lock_init(&rq->lock); rq->nr_running = 0; rq->calc_load_active = 0; rq->calc_load_update = jiffies + LOAD_FREQ; init_cfs_rq(&rq->cfs); init_rt_rq(&rq->rt); init_dl_rq(&rq->dl); #ifdef CONFIG_FAIR_GROUP_SCHED root_task_group.shares = ROOT_TASK_GROUP_LOAD; INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; /* * How much CPU bandwidth does root_task_group get? * * In case of task-groups formed thr' the cgroup filesystem, it * gets 100% of the CPU resources in the system. This overall * system CPU resource is divided among the tasks of * root_task_group and its child task-groups in a fair manner, * based on each entity's (task or task-group's) weight * (se->load.weight). * * In other words, if root_task_group has 10 tasks of weight * 1024) and two child groups A0 and A1 (of weight 1024 each), * then A0's share of the CPU resource is: * * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% * * We achieve this by letting root_task_group's tasks sit * directly in rq->cfs (i.e root_task_group->se[] = NULL). */ init_cfs_bandwidth(&root_task_group.cfs_bandwidth); init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); #endif /* CONFIG_FAIR_GROUP_SCHED */ rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; #ifdef CONFIG_RT_GROUP_SCHED init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); #endif for (j = 0; j < CPU_LOAD_IDX_MAX; j++) rq->cpu_load[j] = 0; #ifdef CONFIG_SMP rq->sd = NULL; rq->rd = NULL; rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; rq->balance_callback = NULL; rq->active_balance = 0; rq->next_balance = jiffies; rq->push_cpu = 0; rq->cpu = i; rq->online = 0; rq->idle_stamp = 0; rq->avg_idle = 2 * sysctl_sched_migration_cost; rq->max_idle_balance_cost = sysctl_sched_migration_cost; INIT_LIST_HEAD(&rq->cfs_tasks); rq_attach_root(rq, &def_root_domain); #ifdef CONFIG_NO_HZ_COMMON rq->last_load_update_tick = jiffies; rq->nohz_flags = 0; #endif #ifdef CONFIG_NO_HZ_FULL rq->last_sched_tick = 0; #endif #endif /* CONFIG_SMP */ init_rq_hrtick(rq); atomic_set(&rq->nr_iowait, 0); } set_load_weight(&init_task); /* * The boot idle thread does lazy MMU switching as well: */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); enter_lazy_tlb(&init_mm, current); /* * Make us the idle thread. Technically, schedule() should not be * called from this thread, however somewhere below it might be, * but because we are the idle thread, we just pick up running again * when this runqueue becomes "idle". */ init_idle(current, smp_processor_id()); calc_load_update = jiffies + LOAD_FREQ; #ifdef CONFIG_SMP zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); /* May be allocated at isolcpus cmdline parse time */ if (cpu_isolated_map == NULL) zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); idle_thread_set_boot_cpu(); set_cpu_rq_start_time(smp_processor_id()); #endif init_sched_fair_class(); init_schedstats(); scheduler_running = 1; }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,839
--- initial +++ final @@ -1,47 +1,47 @@ static void exit_mm(void) { struct mm_struct *mm = current->mm; struct core_state *core_state; mm_release(current, mm); if (!mm) return; sync_mm_rss(mm); /* * Serialize with any possible pending coredump. * We must hold mmap_sem around checking core_state * and clearing tsk->mm. The core-inducing thread * will increment ->nr_threads for each thread in the * group with ->mm != NULL. */ down_read(&mm->mmap_sem); core_state = mm->core_state; if (core_state) { struct core_thread self; up_read(&mm->mmap_sem); self.task = current; self.next = xchg(&core_state->dumper.next, &self); /* * Implies mb(), the result of xchg() must be visible * to core_state->dumper. */ if (atomic_dec_and_test(&core_state->nr_threads)) complete(&core_state->startup); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (!self.task) /* see coredump_finish() */ break; freezable_schedule(); } __set_current_state(TASK_RUNNING); down_read(&mm->mmap_sem); } - atomic_inc(&mm->mm_count); + mmgrab(mm); BUG_ON(mm != current->active_mm); /* more a memory barrier than a real lock */ task_lock(current); current->mm = NULL; up_read(&mm->mmap_sem); enter_lazy_tlb(mm, current); task_unlock(current); mm_update_next_owner(mm); userfaultfd_exit(mm); mmput(mm); if (test_thread_flag(TIF_MEMDIE)) exit_oom_victim(); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,840
--- initial +++ final @@ -1,20 +1,20 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) { struct hfi1_filedata *fd; struct hfi1_devdata *dd = container_of(inode->i_cdev, struct hfi1_devdata, user_cdev); if (!atomic_inc_not_zero(&dd->user_refcount)) return -ENXIO; /* Just take a ref now. Not all opens result in a context assign */ kobject_get(&dd->kobj); /* The real work is performed later in assign_ctxt() */ fd = kzalloc(sizeof(*fd), GFP_KERNEL); if (fd) { fd->rec_cpu_num = -1; /* no cpu affinity by default */ fd->mm = current->mm; - atomic_inc(&fd->mm->mm_count); + mmgrab(fd->mm); fp->private_data = fd; } else { fp->private_data = NULL; if (atomic_dec_and_test(&dd->user_refcount)) complete(&dd->user_comp); return -ENOMEM; } return 0; }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,841
--- initial +++ final @@ -1,9 +1,9 @@ static inline void futex_get_mm(union futex_key *key) { - atomic_inc(&key->private.mm->mm_count); + mmgrab(key->private.mm); /* * Ensure futex_get_mm() implies a full barrier such that * get_futex_key() implies a full barrier. This is relied upon * as smp_mb(); (B), see the ordering comment above. */ smp_mb__after_atomic(); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,842
--- initial +++ final @@ -1,20 +1,20 @@ void start_secondary(void) { unsigned int cpu; unsigned long thread_ptr; /* Calculate thread_info pointer from stack pointer */ __asm__ __volatile__("%0 = SP;\n" : "=r"(thread_ptr)); thread_ptr = thread_ptr & ~(THREAD_SIZE - 1); __asm__ __volatile__(QUOTED_THREADINFO_REG " = %0;\n" : : "r"(thread_ptr)); /* Set the memory struct */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; cpu = smp_processor_id(); setup_irq(BASE_IPI_IRQ + cpu, &ipi_intdesc); /* Register the clock_event dummy */ setup_percpu_clockdev(); printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu); notify_cpu_starting(cpu); set_cpu_online(cpu, true); local_irq_enable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,843
--- initial +++ final @@ -1,36 +1,36 @@ static int i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_mm_struct *mm; int ret = 0; /* During release of the GEM object we hold the struct_mutex. This * precludes us from calling mmput() at that time as that may be * the last reference and so call exit_mmap(). exit_mmap() will * attempt to reap the vma, and if we were holding a GTT mmap * would then call drm_gem_vm_close() and attempt to reacquire * the struct mutex. So in order to avoid that recursion, we have * to defer releasing the mm reference until after we drop the * struct_mutex, i.e. we need to schedule a worker to do the clean * up. */ mutex_lock(&dev_priv->mm_lock); mm = __i915_mm_struct_find(dev_priv, current->mm); if (mm == NULL) { mm = kmalloc(sizeof(*mm), GFP_KERNEL); if (mm == NULL) { ret = -ENOMEM; goto out; } kref_init(&mm->kref); mm->i915 = to_i915(obj->base.dev); mm->mm = current->mm; - atomic_inc(&current->mm->mm_count); + mmgrab(current->mm); mm->mn = NULL; /* Protected by dev_priv->mm_lock */ hash_add(dev_priv->mm_structs, &mm->node, (unsigned long)mm->mm); } else kref_get(&mm->kref); obj->userptr.mm = mm; out: mutex_unlock(&dev_priv->mm_lock); return ret; }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,844
--- initial +++ final @@ -1,113 +1,113 @@ void cpu_init(void) { extern void ia64_mmu_init(void *); static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; unsigned long num_phys_stacked; pal_vm_info_2_u_t vmi; unsigned int max_ctx; struct cpuinfo_ia64 *cpu_info; void *cpu_data; cpu_data = per_cpu_init(); #ifdef CONFIG_SMP /* * insert boot cpu into sibling and core mapes * (must be done after per_cpu area is setup) */ if (smp_processor_id() == 0) { cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0)); cpumask_set_cpu(0, &cpu_core_map[0]); } else { /* * Set ar.k3 so that assembly code in MCA handler can compute * physical addresses of per cpu variables with a simple: * phys = ar.k3 + &per_cpu_var * and the alt-dtlb-miss handler can set per-cpu mapping into * the TLB when needed. head.S already did this for cpu0. */ ia64_set_kr(IA64_KR_PER_CPU_DATA, ia64_tpa(cpu_data) - (long)__per_cpu_start); } #endif get_cache_info(); /* * We can't pass "local_cpu_data" to identify_cpu() because we haven't called * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it * depends on the data returned by identify_cpu(). We break the dependency by * accessing cpu_data() through the canonical per-CPU address. */ cpu_info = cpu_data + ((char *)&__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start); identify_cpu(cpu_info); #ifdef CONFIG_MCKINLEY { #define FEATURE_SET 16 struct ia64_pal_retval iprv; if (cpu_info->family == 0x1f) { PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, (iprv.v1 | 0x80), FEATURE_SET, 0); } } #endif /* Clear the stack memory reserved for pt_regs: */ memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); ia64_set_kr(IA64_KR_FPU_OWNER, 0); /* * Initialize the page-table base register to a global * directory with all zeroes. This ensure that we can handle * TLB-misses to user address-space even before we created the * first user address-space. This may happen, e.g., due to * aggressive use of lfetch.fault. */ ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); /* * Initialize default control register to defer speculative faults except * for those arising from TLB misses, which are not deferred. The * kernel MUST NOT depend on a particular setting of these bits (in other words, * the kernel must have recovery code for all speculative accesses). Turn on * dcr.lc as per recommendation by the architecture team. Most IA-32 apps * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * be fine). */ ia64_setreg(_IA64_REG_CR_DCR, (IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; BUG_ON(current->mm); ia64_mmu_init(ia64_imva(cpu_data)); ia64_mca_cpu_init(ia64_imva(cpu_data)); /* Clear ITC to eliminate sched_clock() overflows in human time. */ ia64_set_itc(0); /* disable all local interrupt sources: */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* clear TPR & XTP to enable all interrupt classes: */ ia64_setreg(_IA64_REG_CR_TPR, 0); /* Clear any pending interrupts left by SAL/EFI */ while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); #ifdef CONFIG_SMP normal_xtp(); #endif /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ if (ia64_pal_vm_summary(NULL, &vmi) == 0) { max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL); } else { printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); max_ctx = (1U << 15) - 1; /* use architected minimum */ } while (max_ctx < ia64_ctx.max_ctx) { unsigned int old = ia64_ctx.max_ctx; if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) break; } if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " "stacked regs\n"); num_phys_stacked = 96; } /* size of physical stacked register partition plus 8 bytes: */ if (num_phys_stacked > max_num_phys_stacked) { ia64_patch_phys_stack_reg(num_phys_stacked * 8 + 8); max_num_phys_stacked = num_phys_stacked; } platform_cpu_init(); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,845
--- initial +++ final @@ -1,21 +1,21 @@ asmlinkage void start_secondary(void) { unsigned int cpu = smp_processor_id(); struct mm_struct *mm = &init_mm; enable_mmu(); - atomic_inc(&mm->mm_count); + mmgrab(mm); atomic_inc(&mm->mm_users); current->active_mm = mm; #ifdef CONFIG_MMU enter_lazy_tlb(mm, current); local_flush_tlb_all(); #endif per_cpu_trap_init(); preempt_disable(); notify_cpu_starting(cpu); local_irq_enable(); calibrate_delay(); smp_store_cpu_info(cpu); set_cpu_online(cpu, true); per_cpu(cpu_state, cpu) = CPU_ONLINE; cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,846
--- initial +++ final @@ -1,49 +1,49 @@ void per_cpu_trap_init(bool is_boot_cpu) { unsigned int cpu = smp_processor_id(); configure_status(); configure_hwrena(); configure_exception_vector(); /* * Before R2 both interrupt numbers were fixed to 7, so on R2 only: * * o read IntCtl.IPTI to determine the timer interrupt * o read IntCtl.IPPCI to determine the performance counter interrupt * o read IntCtl.IPFDC to determine the fast debug channel interrupt */ if (cpu_has_mips_r2_r6) { /* * We shouldn't trust a secondary core has a sane EBASE register * so use the one calculated by the boot CPU. */ if (!is_boot_cpu) { /* If available, use WG to set top bits of EBASE */ if (cpu_has_ebase_wg) { #ifdef CONFIG_64BIT write_c0_ebase_64(ebase | MIPS_EBASE_WG); #else write_c0_ebase(ebase | MIPS_EBASE_WG); #endif } write_c0_ebase(ebase); } cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7; if (!cp0_fdc_irq) cp0_fdc_irq = -1; } else { cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ; cp0_perfcount_irq = -1; cp0_fdc_irq = -1; } if (!cpu_data[cpu].asid_cache) cpu_data[cpu].asid_cache = asid_first_version(cpu); - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); /* Boot CPU's cache setup in setup_arch(). */ if (!is_boot_cpu) cpu_cache_init(); tlb_init(); TLBMISS_HANDLER_SETUP(); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,847
--- initial +++ final @@ -1,37 +1,37 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct kfd_process *p; struct kfd_process_device *pdd = NULL; /* * The kfd_process structure can not be free because the * mmu_notifier srcu is read locked */ p = container_of(mn, struct kfd_process, mmu_notifier); BUG_ON(p->mm != mm); mutex_lock(&kfd_processes_mutex); hash_del_rcu(&p->kfd_processes); mutex_unlock(&kfd_processes_mutex); synchronize_srcu(&kfd_processes_srcu); mutex_lock(&p->mutex); /* In case our notifier is called before IOMMU notifier */ pqm_uninit(&p->pqm); /* Iterate over all process device data structure and check * if we should delete debug managers and reset all wavefronts */ list_for_each_entry(pdd, &p->per_device_data, per_device_list) { if ((pdd->dev->dbgmgr) && (pdd->dev->dbgmgr->pasid == p->pasid)) kfd_dbgmgr_destroy(pdd->dev->dbgmgr); if (pdd->reset_wavefronts) { pr_warn("amdkfd: Resetting all wave fronts\n"); dbgdev_wave_reset_wavefronts(pdd->dev, p); pdd->reset_wavefronts = false; } } mutex_unlock(&p->mutex); /* * Because we drop mm_count inside kfd_process_destroy_delayed * and because the mmu_notifier_unregister function also drop * mm_count we need to take an extra count here. */ - atomic_inc(&p->mm->mm_count); + mmgrab(p->mm); mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm); mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,848
--- initial +++ final @@ -1,24 +1,24 @@ int __khugepaged_enter(struct mm_struct *mm) { struct mm_slot *mm_slot; int wakeup; mm_slot = alloc_mm_slot(); if (!mm_slot) return -ENOMEM; /* __khugepaged_exit() must not run from under us */ VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return 0; } spin_lock(&khugepaged_mm_lock); insert_to_mm_slots_hash(mm, mm_slot); /* * Insert just behind the scanning cursor, to let the area settle * down a little. */ wakeup = list_empty(&khugepaged_scan.mm_head); list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); spin_unlock(&khugepaged_mm_lock); - atomic_inc(&mm->mm_count); + mmgrab(mm); if (wakeup) wake_up_interruptible(&khugepaged_wait); return 0; }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,849
--- initial +++ final @@ -1,29 +1,29 @@ int __ksm_enter(struct mm_struct *mm) { struct mm_slot *mm_slot; int needs_wakeup; mm_slot = alloc_mm_slot(); if (!mm_slot) return -ENOMEM; /* Check ksm_run too? Would need tighter locking */ needs_wakeup = list_empty(&ksm_mm_head.mm_list); spin_lock(&ksm_mmlist_lock); insert_to_mm_slots_hash(mm, mm_slot); /* * When KSM_RUN_MERGE (or KSM_RUN_STOP), * insert just behind the scanning cursor, to let the area settle * down a little; when fork is followed by immediate exec, we don't * want ksmd to waste time setting up and tearing down an rmap_list. * * But when KSM_RUN_UNMERGE, it's important to insert ahead of its * scanning cursor, otherwise KSM pages in newly forked mms will be * missed: then we might as well insert at the end of the list. */ if (ksm_run & KSM_RUN_UNMERGE) list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list); else list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); spin_unlock(&ksm_mmlist_lock); set_bit(MMF_VM_MERGEABLE, &mm->flags); - atomic_inc(&mm->mm_count); + mmgrab(mm); if (needs_wakeup) wake_up_interruptible(&ksm_thread_wait); return 0; }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,850
--- initial +++ final @@ -1,61 +1,61 @@ static struct kvm *kvm_create_vm(unsigned long type) { int r, i; struct kvm *kvm = kvm_arch_alloc_vm(); if (!kvm) return ERR_PTR(-ENOMEM); spin_lock_init(&kvm->mmu_lock); - atomic_inc(&current->mm->mm_count); + mmgrab(current->mm); kvm->mm = current->mm; kvm_eventfd_init(kvm); mutex_init(&kvm->lock); mutex_init(&kvm->irq_lock); mutex_init(&kvm->slots_lock); atomic_set(&kvm->users_count, 1); INIT_LIST_HEAD(&kvm->devices); r = kvm_arch_init_vm(kvm, type); if (r) goto out_err_no_disable; r = hardware_enable_all(); if (r) goto out_err_no_disable; #ifdef CONFIG_HAVE_KVM_IRQFD INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); #endif BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); r = -ENOMEM; for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { struct kvm_memslots *slots = kvm_alloc_memslots(); if (!slots) goto out_err_no_srcu; /* * Generations must be different for each address space. * Init kvm generation close to the maximum to easily test the * code of handling generation number wrap-around. */ slots->generation = i * 2 - 150; rcu_assign_pointer(kvm->memslots[i], slots); } if (init_srcu_struct(&kvm->srcu)) goto out_err_no_srcu; if (init_srcu_struct(&kvm->irq_srcu)) goto out_err_no_irq_srcu; for (i = 0; i < KVM_NR_BUSES; i++) { kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); if (!kvm->buses[i]) goto out_err; } r = kvm_init_mmu_notifier(kvm); if (r) goto out_err; spin_lock(&kvm_lock); list_add(&kvm->vm_list, &vm_list); spin_unlock(&kvm_lock); preempt_notifier_inc(); return kvm; out_err: cleanup_srcu_struct(&kvm->irq_srcu); out_err_no_irq_srcu: cleanup_srcu_struct(&kvm->srcu); out_err_no_srcu: hardware_disable_all(); out_err_no_disable: for (i = 0; i < KVM_NR_BUSES; i++) kfree(kvm->buses[i]); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) kvm_free_memslots(kvm, kvm->memslots[i]); kvm_arch_free_vm(kvm); mmdrop(current->mm); return ERR_PTR(r); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,851
--- initial +++ final @@ -1,18 +1,18 @@ void leon_cpu_pre_online(void *arg) { int cpuid = hard_smp_processor_id(); /* Allow master to continue. The master will then give us the * go-ahead by setting the smp_commenced_mask and will wait without * timeouts until our setup is completed fully (signified by * our bit being set in the cpu_online_mask). */ do_swap(&cpu_callin_map[cpuid], 1); local_ops->cache_all(); local_ops->tlb_all(); /* Fix idle thread fields. */ __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid]) : "memory" /* paranoid */); /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) mb(); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,852
--- initial +++ final @@ -1,41 +1,41 @@ void secondary_start_kernel(void) { unsigned int cpu = smp_processor_id(); struct mm_struct *mm = &init_mm; if (_bfin_swrst & SWRST_DBL_FAULT_B) { printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n"); #ifdef CONFIG_DEBUG_DOUBLEFAULT printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n", initial_pda_coreb.seqstat_doublefault & SEQSTAT_EXCAUSE, initial_pda_coreb.retx_doublefault); printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", initial_pda_coreb.dcplb_doublefault_addr); printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", initial_pda_coreb.icplb_doublefault_addr); #endif printk(KERN_NOTICE " The instruction at %pF caused a double exception\n", initial_pda_coreb.retx); } /* * We want the D-cache to be enabled early, in case the atomic * support code emulates cache coherence (see * __ARCH_SYNC_CORE_DCACHE). */ init_exception_vectors(); local_irq_disable(); /* Attach the new idle task to the global mm. */ atomic_inc(&mm->mm_users); - atomic_inc(&mm->mm_count); + mmgrab(mm); current->active_mm = mm; preempt_disable(); setup_secondary(cpu); platform_secondary_init(cpu); /* setup local core timer */ bfin_local_timer_setup(); local_irq_enable(); bfin_setup_caches(cpu); notify_cpu_starting(cpu); /* * Calibrate loops per jiffy value. * IRQs need to be enabled here - D-cache can be invalidated * in timer irq handler, so core B can read correct jiffies. */ calibrate_delay(); /* We are done with local CPU inits, unblock the boot CPU. */ set_cpu_online(cpu, true); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,853
--- initial +++ final @@ -1,48 +1,48 @@ asmlinkage void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ atomic_inc(&mm->mm_users); - atomic_inc(&mm->mm_count); + mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); enter_lazy_tlb(mm, current); local_flush_tlb_all(); /* * TODO: Some day it might be useful for each Linux CPU to * have its own TBI structure. That would allow each Linux CPU * to run different interrupt handlers for the same IRQ * number. * * For now, simply copying the pointer to the boot CPU's TBI * structure is sufficient because we always want to run the * same interrupt handler whatever CPU takes the interrupt. */ per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); if (!per_cpu(pTBI, cpu)) panic("No TBI found!"); per_cpu_trap_init(cpu); irq_ctx_init(cpu); preempt_disable(); setup_priv(); notify_cpu_starting(cpu); pr_info("CPU%u (thread %u): Booted secondary processor\n", cpu, cpu_2_hwthread_id[cpu]); calibrate_delay(); smp_store_cpu_info(cpu); /* * OK, now it's safe to let the boot CPU continue */ set_cpu_online(cpu, true); complete(&cpu_running); /* * Enable local interrupts. */ tbi_startup_interrupt(TBID_SIGNUM_TRT); local_irq_enable(); /* * OK, it's off to the idle thread for us */ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,854
--- initial +++ final @@ -1,17 +1,17 @@ void use_mm(struct mm_struct *mm) { struct mm_struct *active_mm; struct task_struct *tsk = current; task_lock(tsk); active_mm = tsk->active_mm; if (active_mm != mm) { - atomic_inc(&mm->mm_count); + mmgrab(mm); tsk->active_mm = mm; } tsk->mm = mm; switch_mm(active_mm, mm, tsk); task_unlock(tsk); #ifdef finish_arch_post_lock_switch finish_arch_post_lock_switch(); #endif if (active_mm != mm) mmdrop(active_mm); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,855
--- initial +++ final @@ -1,41 +1,41 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm, int take_mmap_sem) { struct mmu_notifier_mm *mmu_notifier_mm; int ret; BUG_ON(atomic_read(&mm->mm_users) <= 0); /* * Verify that mmu_notifier_init() already run and the global srcu is * initialized. */ BUG_ON(!srcu.per_cpu_ref); ret = -ENOMEM; mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); if (unlikely(!mmu_notifier_mm)) goto out; if (take_mmap_sem) down_write(&mm->mmap_sem); ret = mm_take_all_locks(mm); if (unlikely(ret)) goto out_clean; if (!mm_has_notifiers(mm)) { INIT_HLIST_HEAD(&mmu_notifier_mm->list); spin_lock_init(&mmu_notifier_mm->lock); mm->mmu_notifier_mm = mmu_notifier_mm; mmu_notifier_mm = NULL; } - atomic_inc(&mm->mm_count); + mmgrab(mm); /* * Serialize the update against mmu_notifier_unregister. A * side note: mmu_notifier_release can't run concurrently with * us because we hold the mm_users pin (either implicitly as * current->mm or explicitly with get_task_mm() or similar). * We can't race against any other mmu notifier method either * thanks to mm_take_all_locks(). */ spin_lock(&mm->mmu_notifier_mm->lock); hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); spin_unlock(&mm->mmu_notifier_mm->lock); mm_drop_all_locks(mm); out_clean: if (take_mmap_sem) up_write(&mm->mmap_sem); kfree(mmu_notifier_mm); out: BUG_ON(atomic_read(&mm->mm_users) <= 0); return ret; }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,856
--- initial +++ final @@ -1,33 +1,33 @@ static void __init smp_cpu_init(void) { unsigned long flags; int cpu_id = smp_processor_id(); u16 tmp16; if (test_and_set_bit(cpu_id, &cpu_initialized)) { printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id); for (;;) local_irq_enable(); } printk(KERN_INFO "Initializing CPU#%d\n", cpu_id); - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); /* Force FPU initialization */ clear_using_fpu(current); GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT; mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI); GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT; mn10300_ipi_enable(LOCAL_TIMER_IPI); GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT; mn10300_ipi_enable(RESCHEDULE_IPI); #ifdef CONFIG_MN10300_CACHE_ENABLED GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT; mn10300_ipi_enable(FLUSH_CACHE_IPI); #endif mn10300_ipi_shutdown(SMP_BOOT_IRQ); /* Set up the non-maskable call function IPI */ flags = arch_local_cli_save(); GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; tmp16 = GxICR(CALL_FUNCTION_NMI_IPI); arch_local_irq_restore(flags); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,857
--- initial +++ final @@ -1,16 +1,16 @@ static void mark_oom_victim(struct task_struct *tsk) { struct mm_struct *mm = tsk->mm; WARN_ON(oom_killer_disabled); /* OOM killer might race with memcg OOM */ if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) return; /* oom_mm is bound to the signal struct life time. */ - if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) atomic_inc(&tsk->signal->oom_mm->mm_count); + if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) mmgrab(tsk->signal->oom_mm); /* * Make sure that the task is woken up from uninterruptible sleep * if it is frozen because OOM killer wouldn't be able to free * any memory and livelock. freezing_slow_path will tell the freezer * that TIF_MEMDIE tasks should be ignored. */ __thaw_task(tsk); atomic_inc(&oom_victims); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,858
--- initial +++ final @@ -1,101 +1,101 @@ static void oom_kill_process(struct oom_control *oc, const char *message) { struct task_struct *p = oc->chosen; unsigned int points = oc->chosen_points; struct task_struct *victim = p; struct task_struct *child; struct task_struct *t; struct mm_struct *mm; unsigned int victim_points = 0; static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); bool can_oom_reap = true; /* * If the task is already exiting, don't alarm the sysadmin or kill * its children or threads, just set TIF_MEMDIE so it can die quickly */ task_lock(p); if (task_will_free_mem(p)) { mark_oom_victim(p); wake_oom_reaper(p); task_unlock(p); put_task_struct(p); return; } task_unlock(p); if (__ratelimit(&oom_rs)) dump_header(oc, p); pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", message, task_pid_nr(p), p->comm, points); /* * If any of p's children has a different mm and is eligible for kill, * the one with the highest oom_badness() score is sacrificed for its * parent. This attempts to lose the minimal amount of work done while * still freeing memory. */ read_lock(&tasklist_lock); for_each_thread(p, t) { list_for_each_entry(child, &t->children, sibling) { unsigned int child_points; if (process_shares_mm(child, p->mm)) continue; /* * oom_badness() returns 0 if the thread is unkillable */ child_points = oom_badness(child, oc->memcg, oc->nodemask, oc->totalpages); if (child_points > victim_points) { put_task_struct(victim); victim = child; victim_points = child_points; get_task_struct(victim); } } } read_unlock(&tasklist_lock); p = find_lock_task_mm(victim); if (!p) { put_task_struct(victim); return; } else if (victim != p) { get_task_struct(p); put_task_struct(victim); victim = p; } /* Get a reference to safely compare mm after task_unlock(victim) */ mm = victim->mm; - atomic_inc(&mm->mm_count); + mmgrab(mm); /* * We should send SIGKILL before setting TIF_MEMDIE in order to prevent * the OOM victim from depleting the memory reserves from the user * space under its control. */ do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); mark_oom_victim(victim); pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), K(get_mm_counter(victim->mm, MM_ANONPAGES)), K(get_mm_counter(victim->mm, MM_FILEPAGES)), K(get_mm_counter(victim->mm, MM_SHMEMPAGES))); task_unlock(victim); /* * Kill all user processes sharing victim->mm in other thread groups, if * any. They don't get access to memory reserves, though, to avoid * depletion of all memory. This prevents mm->mmap_sem livelock when an * oom killed thread cannot exit because it requires the semaphore and * its contended by another thread trying to allocate memory itself. * That thread will now get access to memory reserves since it has a * pending fatal signal. */ rcu_read_lock(); for_each_process(p) { if (!process_shares_mm(p, mm)) continue; if (same_thread_group(p, victim)) continue; if (is_global_init(p)) { can_oom_reap = false; set_bit(MMF_OOM_SKIP, &mm->flags); pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n", task_pid_nr(victim), victim->comm, task_pid_nr(p), p->comm); continue; } /* * No use_mm() user needs to read from the userspace so we are * ok to reap it. */ if (unlikely(p->flags & PF_KTHREAD)) continue; do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); } rcu_read_unlock(); if (can_oom_reap) wake_oom_reaper(victim); mmdrop(mm); put_task_struct(victim); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,859
--- initial +++ final @@ -1,24 +1,24 @@ static void __init smp_cpu_init(int cpunum) { extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */ extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */ /* Set modes and Enable floating point coprocessor */ (void)init_per_cpu(cpunum); disable_sr_hashing(); mb(); /* Well, support 2.4 linux scheme as well. */ if (cpu_online(cpunum)) { extern void machine_halt(void); /* arch/parisc.../process.c */ printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); machine_halt(); } notify_cpu_starting(cpunum); set_cpu_online(cpunum, true); /* Initialise the idle task for this CPU */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); init_IRQ(); /* make sure no IRQs are enabled or pending */ start_cpu_itimer(); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,860
--- initial +++ final @@ -1,39 +1,39 @@ void start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); int i, base; - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); preempt_disable(); cpu_callin_map[cpu] = 1; if (smp_ops->setup_cpu) smp_ops->setup_cpu(cpu); if (smp_ops->take_timebase) smp_ops->take_timebase(); secondary_cpu_time_init(); #ifdef CONFIG_PPC64 if (system_state == SYSTEM_RUNNING) vdso_data->processorCount++; vdso_getcpu_init(); #endif /* Update sibling maps */ base = cpu_first_thread_sibling(cpu); for (i = 0; i < threads_per_core; i++) { if (cpu_is_offline(base + i) && (cpu != base + i)) continue; cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); /* cpu_core_map should be a superset of * cpu_sibling_map even if we don't have cache * information, so update the former here, too. */ cpumask_set_cpu(cpu, cpu_core_mask(base + i)); cpumask_set_cpu(base + i, cpu_core_mask(cpu)); } traverse_core_siblings(cpu, true); set_numa_node(numa_cpu_lookup_table[cpu]); set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); smp_wmb(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); local_irq_enable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); BUG(); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,861
--- initial +++ final @@ -1,9 +1,9 @@ void cpu_init(void) { struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id); get_cpu_id(id); if (machine_has_cpu_mhz) update_cpu_mhz(NULL); - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,862
--- initial +++ final @@ -1,22 +1,22 @@ void __init cpu_init(void) { int cpu_id = smp_processor_id(); if (test_and_set_bit(cpu_id, &cpu_initialized)) { printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id); for (;;) local_irq_enable(); } printk(KERN_INFO "Initializing CPU#%d\n", cpu_id); /* Set up and load the per-CPU TSS and LDT */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; if (current->mm) BUG(); /* Force FPU initialization */ current_thread_info()->status = 0; clear_used_math(); #ifdef CONFIG_MMU /* Set up MMU */ init_mmu(); #endif /* Set up ICUIMASK */ outl(0x00070000, M32R_ICU_IMASK_PORTL); /* imask=111 */ }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,863
--- initial +++ final @@ -1,30 +1,30 @@ void smp_callin(void) { int cpuid = hard_smp_processor_id(); __local_per_cpu_offset = __per_cpu_offset(cpuid); if (tlb_type == hypervisor) sun4v_ktsb_register(); __flush_tlb_all(); setup_sparc64_timer(); if (cheetah_pcache_forced_on) cheetah_enable_pcache(); callin_flag = 1; __asm__ __volatile__("membar #Sync\n\t" "flush %%g6" : : : "memory"); /* Clear this or we will die instantly when we * schedule back to this idler... */ current_thread_info()->new_child = 0; /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; /* inform the notifiers about the new cpu */ notify_cpu_starting(cpuid); while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) rmb(); set_cpu_online(cpuid, true); /* idle thread is expected to have preempt disabled */ preempt_disable(); local_irq_enable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,864
--- initial +++ final @@ -1,31 +1,31 @@ void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); init_mmu(); #ifdef CONFIG_DEBUG_KERNEL if (boot_secondary_processors == 0) { pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n", __func__, boot_secondary_processors, cpu); for (;;) __asm__ __volatile__("waiti " __stringify(LOCKLEVEL)); } pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n", __func__, boot_secondary_processors, cpu); #endif /* Init EXCSAVE1 */ secondary_trap_init(); /* All kernel threads share the same mm context. */ atomic_inc(&mm->mm_users); - atomic_inc(&mm->mm_count); + mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); enter_lazy_tlb(mm, current); preempt_disable(); trace_hardirqs_off(); calibrate_delay(); notify_cpu_starting(cpu); secondary_init_irq(); local_timer_setup(cpu); set_cpu_online(cpu, true); local_irq_enable(); complete(&cpu_running); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,865
--- initial +++ final @@ -1,30 +1,30 @@ static void start_secondary(void) { int cpuid; preempt_disable(); cpuid = smp_processor_id(); /* Set our thread pointer appropriately. */ set_my_cpu_offset(__per_cpu_offset[cpuid]); /* * In large machines even this will slow us down, since we * will be contending for for the printk spinlock. */ /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ /* Initialize the current asid for our first page table. */ __this_cpu_write(current_asid, min_asid); /* Set up this thread as another owner of the init_mm */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; if (current->mm) BUG(); enter_lazy_tlb(&init_mm, current); /* Allow hypervisor messages to be received */ init_messaging(); local_irq_enable(); /* Indicate that we're ready to come up. */ /* Must not do this before we're ready to receive messages */ if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { pr_warn("CPU#%d already started!\n", cpuid); for (;;) local_irq_enable(); } smp_nap(); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,866
--- initial +++ final @@ -1,31 +1,31 @@ void sun4d_cpu_pre_online(void *arg) { unsigned long flags; int cpuid; cpuid = hard_smp_processor_id(); /* Unblock the master CPU _only_ when the scheduler state * of all secondary CPUs will be up-to-date, so after * the SMP initialization the master will be just allowed * to call the scheduler code. */ sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1); local_ops->cache_all(); local_ops->tlb_all(); while ((unsigned long)current_set[cpuid] < PAGE_OFFSET) barrier(); while (current_set[cpuid]->cpu != cpuid) barrier(); /* Fix idle thread fields. */ __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid]) : "memory" /* paranoid */); cpu_leds[cpuid] = 0x9; show_leds(cpuid); /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; local_ops->cache_all(); local_ops->tlb_all(); while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) barrier(); spin_lock_irqsave(&sun4d_imsk_lock, flags); cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */ spin_unlock_irqrestore(&sun4d_imsk_lock, flags); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,867
--- initial +++ final @@ -1,19 +1,19 @@ void sun4m_cpu_pre_online(void *arg) { int cpuid = hard_smp_processor_id(); /* Allow master to continue. The master will then give us the * go-ahead by setting the smp_commenced_mask and will wait without * timeouts until our setup is completed fully (signified by * our bit being set in the cpu_online_mask). */ swap_ulong(&cpu_callin_map[cpuid], 1); /* XXX: What's up with all the flushes? */ local_ops->cache_all(); local_ops->tlb_all(); /* Fix idle thread fields. */ __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid]) : "memory" /* paranoid */); /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) mb(); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,868
--- initial +++ final @@ -1,11 +1,11 @@ void trap_init(void) { extern void thread_info_offsets_are_bolixed_pete(void); /* Force linker to barf if mismatched */ if (TI_UWINMASK != offsetof(struct thread_info, uwinmask) || TI_TASK != offsetof(struct thread_info, task) || TI_FLAGS != offsetof(struct thread_info, flags) || TI_CPU != offsetof(struct thread_info, cpu) || TI_PREEMPT != offsetof(struct thread_info, preempt_count) || TI_SOFTIRQ != offsetof(struct thread_info, softirq_count) || TI_HARDIRQ != offsetof(struct thread_info, hardirq_count) || TI_KSP != offsetof(struct thread_info, ksp) || TI_KPC != offsetof(struct thread_info, kpc) || TI_KPSR != offsetof(struct thread_info, kpsr) || TI_KWIM != offsetof(struct thread_info, kwim) || TI_REG_WINDOW != offsetof(struct thread_info, reg_window) || TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) || TI_W_SAVED != offsetof(struct thread_info, w_saved)) thread_info_offsets_are_bolixed_pete(); /* Attach to the address space of init_task. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; /* NOTE: Other cpus have this done as they are started * up on SMP. */ }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,869
--- initial +++ final @@ -1,11 +1,11 @@ void __init trap_init(void) { /* Compile time sanity check. */ BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) || TI_FLAGS != offsetof(struct thread_info, flags) || TI_CPU != offsetof(struct thread_info, cpu) || TI_FPSAVED != offsetof(struct thread_info, fpsaved) || TI_KSP != offsetof(struct thread_info, ksp) || TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) || TI_KREGS != offsetof(struct thread_info, kregs) || TI_UTRAPS != offsetof(struct thread_info, utraps) || TI_REG_WINDOW != offsetof(struct thread_info, reg_window) || TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) || TI_GSR != offsetof(struct thread_info, gsr) || TI_XFSR != offsetof(struct thread_info, xfsr) || TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || TI_NEW_CHILD != offsetof(struct thread_info, new_child) || TI_CURRENT_DS != offsetof(struct thread_info, current_ds) || TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) || TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) || TI_FPREGS != offsetof(struct thread_info, fpregs) || (TI_FPREGS & (64 - 1))); BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) || (TRAP_PER_CPU_PGD_PADDR != offsetof(struct trap_per_cpu, pgd_paddr)) || (TRAP_PER_CPU_CPU_MONDO_PA != offsetof(struct trap_per_cpu, cpu_mondo_pa)) || (TRAP_PER_CPU_DEV_MONDO_PA != offsetof(struct trap_per_cpu, dev_mondo_pa)) || (TRAP_PER_CPU_RESUM_MONDO_PA != offsetof(struct trap_per_cpu, resum_mondo_pa)) || (TRAP_PER_CPU_RESUM_KBUF_PA != offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || (TRAP_PER_CPU_NONRESUM_MONDO_PA != offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || (TRAP_PER_CPU_NONRESUM_KBUF_PA != offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || (TRAP_PER_CPU_FAULT_INFO != offsetof(struct trap_per_cpu, fault_info)) || (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || (TRAP_PER_CPU_CPU_LIST_PA != offsetof(struct trap_per_cpu, cpu_list_pa)) || (TRAP_PER_CPU_TSB_HUGE != offsetof(struct trap_per_cpu, tsb_huge)) || (TRAP_PER_CPU_TSB_HUGE_TEMP != offsetof(struct trap_per_cpu, tsb_huge_temp)) || (TRAP_PER_CPU_IRQ_WORKLIST_PA != offsetof(struct trap_per_cpu, irq_worklist_pa)) || (TRAP_PER_CPU_CPU_MONDO_QMASK != offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || (TRAP_PER_CPU_DEV_MONDO_QMASK != offsetof(struct trap_per_cpu, dev_mondo_qmask)) || (TRAP_PER_CPU_RESUM_QMASK != offsetof(struct trap_per_cpu, resum_qmask)) || (TRAP_PER_CPU_NONRESUM_QMASK != offsetof(struct trap_per_cpu, nonresum_qmask)) || (TRAP_PER_CPU_PER_CPU_BASE != offsetof(struct trap_per_cpu, __per_cpu_base))); BUILD_BUG_ON((TSB_CONFIG_TSB != offsetof(struct tsb_config, tsb)) || (TSB_CONFIG_RSS_LIMIT != offsetof(struct tsb_config, tsb_rss_limit)) || (TSB_CONFIG_NENTRIES != offsetof(struct tsb_config, tsb_nentries)) || (TSB_CONFIG_REG_VAL != offsetof(struct tsb_config, tsb_reg_val)) || (TSB_CONFIG_MAP_VADDR != offsetof(struct tsb_config, tsb_map_vaddr)) || (TSB_CONFIG_MAP_PTE != offsetof(struct tsb_config, tsb_map_pte))); /* Attach to the address space of init_task. On SMP we * do this in smp.c:smp_callin for other cpus. */ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,870
--- initial +++ final @@ -1,35 +1,35 @@ void __init trap_init(void) { int i; pgd_current = (unsigned long)init_mm.pgd; /* DEBUG EXCEPTION */ memcpy((void *)DEBUG_VECTOR_BASE_ADDR, &debug_exception_vector, DEBUG_VECTOR_SIZE); /* NMI EXCEPTION */ memcpy((void *)GENERAL_VECTOR_BASE_ADDR, &general_exception_vector, GENERAL_VECTOR_SIZE); /* * Initialise exception handlers */ for (i = 0; i <= 31; i++) set_except_vector(i, handle_reserved); set_except_vector(1, handle_nmi); set_except_vector(2, handle_adelinsn); set_except_vector(3, handle_tlb_refill); set_except_vector(4, handle_tlb_invaild); set_except_vector(5, handle_ibe); set_except_vector(6, handle_pel); set_except_vector(7, handle_sys); set_except_vector(8, handle_ccu); set_except_vector(9, handle_ri); set_except_vector(10, handle_tr); set_except_vector(11, handle_adedata); set_except_vector(12, handle_adedata); set_except_vector(13, handle_tlb_refill); set_except_vector(14, handle_tlb_invaild); set_except_vector(15, handle_mod); set_except_vector(16, handle_cee); set_except_vector(17, handle_cpe); set_except_vector(18, handle_dbe); flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR); - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); current->active_mm = &init_mm; cpu_cache_init(); }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm.mm_count); + mmgrab(&init_mm); <|end_of_text|>
8,871
--- initial +++ final @@ -1,36 +1,36 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) { struct userfaultfd_ctx *ctx = NULL, *octx; struct userfaultfd_fork_ctx *fctx; octx = vma->vm_userfaultfd_ctx.ctx; if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); return 0; } list_for_each_entry(fctx, fcs, list) if (fctx->orig == octx) { ctx = fctx->new; break; } if (!ctx) { fctx = kmalloc(sizeof(*fctx), GFP_KERNEL); if (!fctx) return -ENOMEM; ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); if (!ctx) { kfree(fctx); return -ENOMEM; } atomic_set(&ctx->refcount, 1); ctx->flags = octx->flags; ctx->state = UFFD_STATE_RUNNING; ctx->features = octx->features; ctx->released = false; ctx->mm = vma->vm_mm; - atomic_inc(&ctx->mm->mm_count); + mmgrab(ctx->mm); userfaultfd_ctx_get(octx); fctx->orig = octx; fctx->new = ctx; list_add_tail(&fctx->list, fcs); } vma->vm_userfaultfd_ctx.ctx = ctx; return 0; }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,872
--- initial +++ final @@ -1,28 +1,28 @@ static struct file *userfaultfd_file_create(int flags) { struct file *file; struct userfaultfd_ctx *ctx; BUG_ON(!current->mm); /* Check the UFFD_* constants for consistency. */ BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK); file = ERR_PTR(-EINVAL); if (flags & ~UFFD_SHARED_FCNTL_FLAGS) goto out; file = ERR_PTR(-ENOMEM); ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); if (!ctx) goto out; atomic_set(&ctx->refcount, 1); ctx->flags = flags; ctx->features = 0; ctx->state = UFFD_STATE_WAIT_API; ctx->released = false; ctx->mm = current->mm; /* prevent the mm struct to be freed */ - atomic_inc(&ctx->mm->mm_count); + mmgrab(ctx->mm); file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx, O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); if (IS_ERR(file)) { mmdrop(ctx->mm); kmem_cache_free(userfaultfd_ctx_cachep, ctx); } out: return file; }<sep>@@ expression init_mm; @@ - atomic_inc(&init_mm->mm_count); + mmgrab(init_mm); <|end_of_text|>
8,873
--- initial +++ final @@ -1,20 +1,18 @@ int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np) { struct drm_encoder *encoder; struct drm_bridge *bridge; int ret = 0; encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); if (encoder == NULL) return -ENOMEM; /* Locate drm bridge from the hdmi encoder DT node */ bridge = of_drm_find_bridge(np); if (!bridge) return -EPROBE_DEFER; encoder->possible_crtcs = 1; encoder->possible_clones = 0; ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); if (ret) return ret; /* Link drm_bridge to encoder */ - bridge->encoder = encoder; - encoder->bridge = bridge; - ret = drm_bridge_attach(drm, bridge); + ret = drm_bridge_attach(encoder, bridge, NULL); if (ret) drm_encoder_cleanup(encoder); return ret; }<sep>@@ expression br,enc,ret; @@ - br->encoder = enc; ... - enc->bridge = br; ... - ret = drm_bridge_attach(drm, br); + ret = drm_bridge_attach(enc, br, NULL); <|end_of_text|>
8,958
--- initial +++ final @@ -1,31 +1,29 @@ static int imx_pd_register(struct drm_device *drm, struct imx_parallel_display *imxpd) { struct drm_encoder *encoder = &imxpd->encoder; int ret; ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node); if (ret) return ret; /* set the connector's dpms to OFF so that * drm_helper_connector_dpms() won't return * immediately since the current state is ON * at this point. */ imxpd->connector.dpms = DRM_MODE_DPMS_OFF; drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs); drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL); if (!imxpd->bridge) { drm_connector_helper_add(&imxpd->connector, &imx_pd_connector_helper_funcs); drm_connector_init(drm, &imxpd->connector, &imx_pd_connector_funcs, DRM_MODE_CONNECTOR_VGA); } if (imxpd->panel) drm_panel_attach(imxpd->panel, &imxpd->connector); if (imxpd->bridge) { - imxpd->bridge->encoder = encoder; - encoder->bridge = imxpd->bridge; - ret = drm_bridge_attach(drm, imxpd->bridge); + ret = drm_bridge_attach(encoder, imxpd->bridge, NULL); if (ret < 0) { dev_err(imxpd->dev, "failed to attach bridge: %d\n", ret); return ret; } } else { drm_mode_connector_attach_encoder(&imxpd->connector, encoder); } return 0; }<sep>@@ expression br,enc,ret; @@ - br->encoder = enc; ... - enc->bridge = br; ... - ret = drm_bridge_attach(drm, br); + ret = drm_bridge_attach(enc, br, NULL); <|end_of_text|>
8,959
--- initial +++ final @@ -1,26 +1,26 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) { struct amdgpu_fbdev *rfbdev; int bpp_sel = 32; int ret; /* don't init fbdev on hw without DCE */ if (!adev->mode_info.mode_config_initialized) return 0; /* don't init fbdev if there are no connectors */ if (list_empty(&adev->ddev->mode_config.connector_list)) return 0; /* select 8 bpp console on low vram cards */ if (adev->mc.real_vram_size <= (32 * 1024 * 1024)) bpp_sel = 8; rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL); if (!rfbdev) return -ENOMEM; rfbdev->adev = adev; adev->mode_info.rfbdev = rfbdev; drm_fb_helper_prepare(adev->ddev, &rfbdev->helper, &amdgpu_fb_helper_funcs); - ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper, adev->mode_info.num_crtc, AMDGPUFB_CONN_LIMIT); + ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper, AMDGPUFB_CONN_LIMIT); if (ret) { kfree(rfbdev); return ret; } drm_fb_helper_single_add_all_connectors(&rfbdev->helper); /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(adev->ddev); drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); return 0; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,960
--- initial +++ final @@ -1,42 +1,42 @@ static int arcpgu_load(struct drm_device *drm) { struct platform_device *pdev = to_platform_device(drm->dev); struct arcpgu_drm_private *arcpgu; struct device_node *encoder_node; struct resource *res; int ret; arcpgu = devm_kzalloc(&pdev->dev, sizeof(*arcpgu), GFP_KERNEL); if (arcpgu == NULL) return -ENOMEM; drm->dev_private = arcpgu; arcpgu->clk = devm_clk_get(drm->dev, "pxlclk"); if (IS_ERR(arcpgu->clk)) return PTR_ERR(arcpgu->clk); arcpgu_setup_mode_config(drm); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); arcpgu->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(arcpgu->regs)) return PTR_ERR(arcpgu->regs); dev_info(drm->dev, "arc_pgu ID: 0x%x\n", arc_pgu_read(arcpgu, ARCPGU_REG_ID)); /* Get the optional framebuffer memory resource */ ret = of_reserved_mem_device_init(drm->dev); if (ret && ret != -ENODEV) return ret; if (dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32))) return -ENODEV; if (arc_pgu_setup_crtc(drm) < 0) return -ENODEV; /* find the encoder node and initialize it */ encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0); if (encoder_node) { ret = arcpgu_drm_hdmi_init(drm, encoder_node); of_node_put(encoder_node); if (ret < 0) return ret; } else { ret = arcpgu_drm_sim_init(drm, NULL); if (ret < 0) return ret; } drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); - arcpgu->fbdev = drm_fbdev_cma_init(drm, 16, drm->mode_config.num_crtc, drm->mode_config.num_connector); + arcpgu->fbdev = drm_fbdev_cma_init(drm, 16, drm->mode_config.num_connector); if (IS_ERR(arcpgu->fbdev)) { ret = PTR_ERR(arcpgu->fbdev); arcpgu->fbdev = NULL; return -ENODEV; } platform_set_drvdata(pdev, arcpgu); return 0; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,961
--- initial +++ final @@ -1,31 +1,31 @@ int armada_fbdev_init(struct drm_device *dev) { struct armada_private *priv = dev->dev_private; struct drm_fb_helper *fbh; int ret; fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL); if (!fbh) return -ENOMEM; priv->fbdev = fbh; drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs); - ret = drm_fb_helper_init(dev, fbh, 1, 1); + ret = drm_fb_helper_init(dev, fbh, 1); if (ret) { DRM_ERROR("failed to initialize drm fb helper\n"); goto err_fb_helper; } ret = drm_fb_helper_single_add_all_connectors(fbh); if (ret) { DRM_ERROR("failed to add fb connectors\n"); goto err_fb_setup; } ret = drm_fb_helper_initial_config(fbh, 32); if (ret) { DRM_ERROR("failed to set initial config\n"); goto err_fb_setup; } return 0; err_fb_setup: drm_fb_helper_release_fbi(fbh); drm_fb_helper_fini(fbh); err_fb_helper: priv->fbdev = NULL; return ret; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,962
--- initial +++ final @@ -1,24 +1,24 @@ int ast_fbdev_init(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; struct ast_fbdev *afbdev; int ret; afbdev = kzalloc(sizeof(struct ast_fbdev), GFP_KERNEL); if (!afbdev) return -ENOMEM; ast->fbdev = afbdev; spin_lock_init(&afbdev->dirty_lock); drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs); - ret = drm_fb_helper_init(dev, &afbdev->helper, 1, 1); + ret = drm_fb_helper_init(dev, &afbdev->helper, 1); if (ret) goto free; ret = drm_fb_helper_single_add_all_connectors(&afbdev->helper); if (ret) goto fini; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(dev); ret = drm_fb_helper_initial_config(&afbdev->helper, 32); if (ret) goto fini; return 0; fini: drm_fb_helper_fini(&afbdev->helper); free: kfree(afbdev); return ret; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,963
--- initial +++ final @@ -1,58 +1,58 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) { struct platform_device *pdev = to_platform_device(dev->dev); const struct of_device_id *match; struct atmel_hlcdc_dc *dc; int ret; match = of_match_node(atmel_hlcdc_of_match, dev->dev->parent->of_node); if (!match) { dev_err(&pdev->dev, "invalid compatible string\n"); return -ENODEV; } if (!match->data) { dev_err(&pdev->dev, "invalid hlcdc description\n"); return -EINVAL; } dc = devm_kzalloc(dev->dev, sizeof(*dc), GFP_KERNEL); if (!dc) return -ENOMEM; dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0); if (!dc->wq) return -ENOMEM; init_waitqueue_head(&dc->commit.wait); dc->desc = match->data; dc->hlcdc = dev_get_drvdata(dev->dev->parent); dev->dev_private = dc; ret = clk_prepare_enable(dc->hlcdc->periph_clk); if (ret) { dev_err(dev->dev, "failed to enable periph_clk\n"); goto err_destroy_wq; } pm_runtime_enable(dev->dev); ret = drm_vblank_init(dev, 1); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { dev_err(dev->dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } drm_mode_config_reset(dev); pm_runtime_get_sync(dev->dev); ret = drm_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); if (ret < 0) { dev_err(dev->dev, "failed to install IRQ handler\n"); goto err_periph_clk_disable; } platform_set_drvdata(pdev, dev); - dc->fbdev = drm_fbdev_cma_init(dev, 24, dev->mode_config.num_crtc, dev->mode_config.num_connector); + dc->fbdev = drm_fbdev_cma_init(dev, 24, dev->mode_config.num_connector); if (IS_ERR(dc->fbdev)) dc->fbdev = NULL; drm_kms_helper_poll_init(dev); return 0; err_periph_clk_disable: pm_runtime_disable(dev->dev); clk_disable_unprepare(dc->hlcdc->periph_clk); err_destroy_wq: destroy_workqueue(dc->wq); return ret; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,964
--- initial +++ final @@ -1,16 +1,16 @@ int bochs_fbdev_init(struct bochs_device *bochs) { int ret; drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper, &bochs_fb_helper_funcs); - ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1, 1); + ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1); if (ret) return ret; ret = drm_fb_helper_single_add_all_connectors(&bochs->fb.helper); if (ret) goto fini; drm_helper_disable_unused_functions(bochs->dev); ret = drm_fb_helper_initial_config(&bochs->fb.helper, 32); if (ret) goto fini; bochs->fb.initialized = true; return 0; fini: drm_fb_helper_fini(&bochs->fb.helper); return ret; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,965
--- initial +++ final @@ -1,18 +1,18 @@ int cirrus_fbdev_init(struct cirrus_device *cdev) { struct cirrus_fbdev *gfbdev; int ret; int bpp_sel = 24; /*bpp_sel = 8;*/ gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL); if (!gfbdev) return -ENOMEM; cdev->mode_info.gfbdev = gfbdev; spin_lock_init(&gfbdev->dirty_lock); drm_fb_helper_prepare(cdev->dev, &gfbdev->helper, &cirrus_fb_helper_funcs); - ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper, cdev->num_crtc, CIRRUSFB_CONN_LIMIT); + ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper, CIRRUSFB_CONN_LIMIT); if (ret) return ret; ret = drm_fb_helper_single_add_all_connectors(&gfbdev->helper); if (ret) return ret; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(cdev->dev); return drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel); }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,966
--- initial +++ final @@ -1,3 +1,3 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, unsigned int preferred_bpp, unsigned int num_crtc, unsigned int max_conn_count) { - return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc, max_conn_count, &drm_fb_cma_funcs); + return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, max_conn_count, &drm_fb_cma_funcs); }<sep>@r@ expression A,B,D,E; identifier C; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,967
--- initial +++ final @@ -1,34 +1,34 @@ struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, unsigned int preferred_bpp, unsigned int num_crtc, unsigned int max_conn_count, const struct drm_framebuffer_funcs *funcs) { struct drm_fbdev_cma *fbdev_cma; struct drm_fb_helper *helper; int ret; fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL); if (!fbdev_cma) { dev_err(dev->dev, "Failed to allocate drm fbdev.\n"); return ERR_PTR(-ENOMEM); } fbdev_cma->fb_funcs = funcs; helper = &fbdev_cma->fb_helper; drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs); - ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count); + ret = drm_fb_helper_init(dev, helper, max_conn_count); if (ret < 0) { dev_err(dev->dev, "Failed to initialize drm fb helper.\n"); goto err_free; } ret = drm_fb_helper_single_add_all_connectors(helper); if (ret < 0) { dev_err(dev->dev, "Failed to add connectors.\n"); goto err_drm_fb_helper_fini; } ret = drm_fb_helper_initial_config(helper, preferred_bpp); if (ret < 0) { dev_err(dev->dev, "Failed to set initial hw configuration.\n"); goto err_drm_fb_helper_fini; } return fbdev_cma; err_drm_fb_helper_fini: drm_fb_helper_fini(helper); err_free: kfree(fbdev_cma); return ERR_PTR(ret); }<sep>@r@ expression A,B,D,E; identifier C; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,968
--- initial +++ final @@ -1,38 +1,36 @@ int exynos_drm_fbdev_init(struct drm_device *dev) { struct exynos_drm_fbdev *fbdev; struct exynos_drm_private *private = dev->dev_private; struct drm_fb_helper *helper; - unsigned int num_crtc; int ret; if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) return 0; if (!exynos_drm_fbdev_is_anything_connected(dev)) return 0; fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); if (!fbdev) return -ENOMEM; private ->fb_helper = helper = &fbdev->drm_fb_helper; drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs); - num_crtc = dev->mode_config.num_crtc; - ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR); + ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR); if (ret < 0) { DRM_ERROR("failed to initialize drm fb helper.\n"); goto err_init; } ret = drm_fb_helper_single_add_all_connectors(helper); if (ret < 0) { DRM_ERROR("failed to register drm_fb_helper_connector.\n"); goto err_setup; } ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); if (ret < 0) { DRM_ERROR("failed to set up hw configuration.\n"); goto err_setup; } return 0; err_setup: drm_fb_helper_fini(helper); err_init: private ->fb_helper = NULL; kfree(fbdev); return ret; }<sep>@r@ expression A,B,D,E; identifier C; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) @@ identifier r.C; type T; expression V; @@ - T C; <... when != C - C = V; ...> <|end_of_text|>
8,969
--- initial +++ final @@ -1,23 +1,23 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev, unsigned int preferred_bpp, unsigned int num_crtc, unsigned int max_connectors) { struct drm_device *drm = fbdev->base.dev; int err; - err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors); + err = drm_fb_helper_init(drm, &fbdev->base, max_connectors); if (err < 0) { dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n", err); return err; } err = drm_fb_helper_single_add_all_connectors(&fbdev->base); if (err < 0) { dev_err(drm->dev, "failed to add connectors: %d\n", err); goto fini; } err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp); if (err < 0) { dev_err(drm->dev, "failed to set initial configuration: %d\n", err); goto fini; } return 0; fini: drm_fb_helper_fini(&fbdev->base); return err; }<sep>@r@ expression A,B,D,E; identifier C; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,970
--- initial +++ final @@ -1,26 +1,26 @@ int psb_fbdev_init(struct drm_device *dev) { struct psb_fbdev *fbdev; struct drm_psb_private *dev_priv = dev->dev_private; int ret; fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL); if (!fbdev) { dev_err(dev->dev, "no memory\n"); return -ENOMEM; } dev_priv->fbdev = fbdev; drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs); - ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs, INTELFB_CONN_LIMIT); + ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper, INTELFB_CONN_LIMIT); if (ret) goto free; ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper); if (ret) goto fini; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(dev); ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32); if (ret) goto fini; return 0; fini: drm_fb_helper_fini(&fbdev->psb_fb_helper); free: kfree(fbdev); return ret; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,971
--- initial +++ final @@ -1,36 +1,36 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; int ret; ret = fsl_dcu_drm_modeset_init(fsl_dev); if (ret < 0) { dev_err(dev->dev, "failed to initialize mode setting\n"); return ret; } ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); goto done; } ret = fsl_dcu_drm_irq_init(dev); if (ret < 0) goto done; dev->irq_enabled = true; if (legacyfb_depth != 16 && legacyfb_depth != 24 && legacyfb_depth != 32) { dev_warn(dev->dev, "Invalid legacyfb_depth. Defaulting to 24bpp\n"); legacyfb_depth = 24; } - fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1, 1); + fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1); if (IS_ERR(fsl_dev->fbdev)) { ret = PTR_ERR(fsl_dev->fbdev); fsl_dev->fbdev = NULL; goto done; } return 0; done: drm_kms_helper_poll_fini(dev); if (fsl_dev->fbdev) drm_fbdev_cma_fini(fsl_dev->fbdev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); drm_irq_uninstall(dev); dev->dev_private = NULL; return ret; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,972
--- initial +++ final @@ -1,58 +1,58 @@ static int hdlcd_drm_bind(struct device *dev) { struct drm_device *drm; struct hdlcd_drm_private *hdlcd; int ret; hdlcd = devm_kzalloc(dev, sizeof(*hdlcd), GFP_KERNEL); if (!hdlcd) return -ENOMEM; drm = drm_dev_alloc(&hdlcd_driver, dev); if (IS_ERR(drm)) return PTR_ERR(drm); drm->dev_private = hdlcd; dev_set_drvdata(dev, drm); hdlcd_setup_mode_config(drm); ret = hdlcd_load(drm, 0); if (ret) goto err_free; ret = component_bind_all(dev, drm); if (ret) { DRM_ERROR("Failed to bind all components\n"); goto err_unload; } ret = pm_runtime_set_active(dev); if (ret) goto err_pm_active; pm_runtime_enable(dev); ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { DRM_ERROR("failed to initialise vblank\n"); goto err_vblank; } drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); - hdlcd->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc, drm->mode_config.num_connector); + hdlcd->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_connector); if (IS_ERR(hdlcd->fbdev)) { ret = PTR_ERR(hdlcd->fbdev); hdlcd->fbdev = NULL; goto err_fbdev; } ret = drm_dev_register(drm, 0); if (ret) goto err_register; return 0; err_register: if (hdlcd->fbdev) { drm_fbdev_cma_fini(hdlcd->fbdev); hdlcd->fbdev = NULL; } err_fbdev: drm_kms_helper_poll_fini(drm); drm_vblank_cleanup(drm); err_vblank: pm_runtime_disable(drm->dev); err_pm_active: component_unbind_all(dev, drm); err_unload: drm_irq_uninstall(drm); of_reserved_mem_device_release(drm->dev); err_free: drm_mode_config_cleanup(drm); dev_set_drvdata(dev, NULL); drm_dev_unref(drm); return ret; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,973
--- initial +++ final @@ -1,59 +1,59 @@ int hibmc_fbdev_init(struct hibmc_drm_private *priv) { int ret; struct fb_var_screeninfo *var; struct fb_fix_screeninfo *fix; struct hibmc_fbdev *hifbdev; hifbdev = devm_kzalloc(priv->dev->dev, sizeof(*hifbdev), GFP_KERNEL); if (!hifbdev) { DRM_ERROR("failed to allocate hibmc_fbdev\n"); return -ENOMEM; } priv->fbdev = hifbdev; drm_fb_helper_prepare(priv->dev, &hifbdev->helper, &hibmc_fbdev_helper_funcs); /* Now just one crtc and one channel */ - ret = drm_fb_helper_init(priv->dev, &hifbdev->helper, 1, 1); + ret = drm_fb_helper_init(priv->dev, &hifbdev->helper, 1); if (ret) { DRM_ERROR("failed to initialize fb helper: %d\n", ret); return ret; } ret = drm_fb_helper_single_add_all_connectors(&hifbdev->helper); if (ret) { DRM_ERROR("failed to add all connectors: %d\n", ret); goto fini; } ret = drm_fb_helper_initial_config(&hifbdev->helper, 16); if (ret) { DRM_ERROR("failed to setup initial conn config: %d\n", ret); goto fini; } var = &hifbdev->helper.fbdev->var; fix = &hifbdev->helper.fbdev->fix; DRM_DEBUG_DRIVER("Member of info->var is :\n" "xres=%d\n" "yres=%d\n" "xres_virtual=%d\n" "yres_virtual=%d\n" "xoffset=%d\n" "yoffset=%d\n" "bits_per_pixel=%d\n" "...\n", var->xres, var->yres, var->xres_virtual, var->yres_virtual, var->xoffset, var->yoffset, var->bits_per_pixel); DRM_DEBUG_DRIVER("Member of info->fix is :\n" "smem_start=%lx\n" "smem_len=%d\n" "type=%d\n" "type_aux=%d\n" "visual=%d\n" "xpanstep=%d\n" "ypanstep=%d\n" "ywrapstep=%d\n" "line_length=%d\n" "accel=%d\n" "capabilities=%d\n" "...\n", fix->smem_start, fix->smem_len, fix->type, fix->type_aux, fix->visual, fix->xpanstep, fix->ypanstep, fix->ywrapstep, fix->line_length, fix->accel, fix->capabilities); return 0; fini: drm_fb_helper_fini(&hifbdev->helper); return ret; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,974
--- initial +++ final @@ -1,78 +1,78 @@ static int imx_drm_bind(struct device *dev) { struct drm_device *drm; struct imx_drm_device *imxdrm; int ret; drm = drm_dev_alloc(&imx_drm_driver, dev); if (IS_ERR(drm)) return PTR_ERR(drm); imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL); if (!imxdrm) { ret = -ENOMEM; goto err_unref; } imxdrm->drm = drm; drm->dev_private = imxdrm; /* * enable drm irq mode. * - with irq_enabled = true, we can use the vblank feature. * * P.S. note that we wouldn't use drm irq handler but * just specific driver own one instead because * drm framework supports only one irq handler and * drivers can well take care of their interrupts */ drm->irq_enabled = true; /* * set max width and height as default value(4096x4096). * this value would be used to check framebuffer size limitation * at drm_mode_addfb(). */ drm->mode_config.min_width = 64; drm->mode_config.min_height = 64; drm->mode_config.max_width = 4096; drm->mode_config.max_height = 4096; drm->mode_config.funcs = &imx_drm_mode_config_funcs; drm->mode_config.helper_private = &imx_drm_mode_config_helpers; drm_mode_config_init(drm); ret = drm_vblank_init(drm, MAX_CRTC); if (ret) goto err_kms; dev_set_drvdata(dev, drm); /* Now try and bind all our sub-components */ ret = component_bind_all(dev, drm); if (ret) goto err_vblank; drm_mode_config_reset(drm); /* * All components are now initialised, so setup the fb helper. * The fb helper takes copies of key hardware information, so the * crtcs/connectors/encoders must not change after this point. */ #if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) if (legacyfb_depth != 16 && legacyfb_depth != 32) { dev_warn(dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n"); legacyfb_depth = 16; } - imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, drm->mode_config.num_crtc, MAX_CRTC); + imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, MAX_CRTC); if (IS_ERR(imxdrm->fbhelper)) { ret = PTR_ERR(imxdrm->fbhelper); imxdrm->fbhelper = NULL; goto err_unbind; } #endif drm_kms_helper_poll_init(drm); ret = drm_dev_register(drm, 0); if (ret) goto err_fbhelper; return 0; err_fbhelper: drm_kms_helper_poll_fini(drm); #if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) if (imxdrm->fbhelper) drm_fbdev_cma_fini(imxdrm->fbhelper); err_unbind: #endif component_unbind_all(drm->dev, drm); err_vblank: drm_vblank_cleanup(drm); err_kms: drm_mode_config_cleanup(drm); err_unref: drm_dev_unref(drm); return ret; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,975
--- initial +++ final @@ -1,19 +1,19 @@ int intel_fbdev_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_fbdev *ifbdev; int ret; if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0)) return -ENODEV; ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); if (ifbdev == NULL) return -ENOMEM; drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); if (!intel_fbdev_init_bios(dev, ifbdev)) ifbdev->preferred_bpp = 32; - ret = drm_fb_helper_init(dev, &ifbdev->helper, INTEL_INFO(dev_priv)->num_pipes, 4); + ret = drm_fb_helper_init(dev, &ifbdev->helper, 4); if (ret) { kfree(ifbdev); return ret; } dev_priv->fbdev = ifbdev; INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); drm_fb_helper_single_add_all_connectors(&ifbdev->helper); return 0; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,976
--- initial +++ final @@ -1,9 +1,9 @@ static void kirin_fbdev_output_poll_changed(struct drm_device *dev) { struct kirin_drm_private *priv = dev->dev_private; if (priv->fbdev) { drm_fbdev_cma_hotplug_event(priv->fbdev); } else { - priv->fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc, dev->mode_config.num_connector); + priv->fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_connector); if (IS_ERR(priv->fbdev)) priv->fbdev = NULL; } }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,977
--- initial +++ final @@ -1,143 +1,143 @@ static int malidp_bind(struct device *dev) { struct resource *res; struct drm_device *drm; struct device_node *ep; struct malidp_drm *malidp; struct malidp_hw_device *hwdev; struct platform_device *pdev = to_platform_device(dev); struct of_device_id const *dev_id; /* number of lines for the R, G and B output */ u8 output_width[MAX_OUTPUT_CHANNELS]; int ret = 0, i; u32 version, out_depth = 0; malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL); if (!malidp) return -ENOMEM; hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL); if (!hwdev) return -ENOMEM; /* * copy the associated data from malidp_drm_of_match to avoid * having to keep a reference to the OF node after binding */ memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev)); malidp->dev = hwdev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hwdev->regs = devm_ioremap_resource(dev, res); if (IS_ERR(hwdev->regs)) return PTR_ERR(hwdev->regs); hwdev->pclk = devm_clk_get(dev, "pclk"); if (IS_ERR(hwdev->pclk)) return PTR_ERR(hwdev->pclk); hwdev->aclk = devm_clk_get(dev, "aclk"); if (IS_ERR(hwdev->aclk)) return PTR_ERR(hwdev->aclk); hwdev->mclk = devm_clk_get(dev, "mclk"); if (IS_ERR(hwdev->mclk)) return PTR_ERR(hwdev->mclk); hwdev->pxlclk = devm_clk_get(dev, "pxlclk"); if (IS_ERR(hwdev->pxlclk)) return PTR_ERR(hwdev->pxlclk); /* Get the optional framebuffer memory resource */ ret = of_reserved_mem_device_init(dev); if (ret && ret != -ENODEV) return ret; drm = drm_dev_alloc(&malidp_driver, dev); if (IS_ERR(drm)) { ret = PTR_ERR(drm); goto alloc_fail; } /* Enable APB clock in order to get access to the registers */ clk_prepare_enable(hwdev->pclk); /* * Enable AXI clock and main clock so that prefetch can start once * the registers are set */ clk_prepare_enable(hwdev->aclk); clk_prepare_enable(hwdev->mclk); dev_id = of_match_device(malidp_drm_of_match, dev); if (!dev_id) { ret = -EINVAL; goto query_hw_fail; } if (!malidp_has_sufficient_address_space(res, dev_id)) { DRM_ERROR("Insufficient address space in device-tree.\n"); ret = -EINVAL; goto query_hw_fail; } if (!malidp_is_compatible_hw_id(hwdev, dev_id)) { ret = -EINVAL; goto query_hw_fail; } ret = hwdev->query_hw(hwdev); if (ret) { DRM_ERROR("Invalid HW configuration\n"); goto query_hw_fail; } version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID); DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, (version >> 12) & 0xf, (version >> 8) & 0xf); /* set the number of lines used for output of RGB data */ ret = of_property_read_u8_array(dev->of_node, "arm,malidp-output-port-lines", output_width, MAX_OUTPUT_CHANNELS); if (ret) goto query_hw_fail; for (i = 0; i < MAX_OUTPUT_CHANNELS; i++) out_depth = (out_depth << 8) | (output_width[i] & 0xf); malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base); drm->dev_private = malidp; dev_set_drvdata(dev, drm); atomic_set(&malidp->config_valid, 0); init_waitqueue_head(&malidp->wq); ret = malidp_init(drm); if (ret < 0) goto init_fail; /* Set the CRTC's port so that the encoder component can find it */ ep = of_graph_get_next_endpoint(dev->of_node, NULL); if (!ep) { ret = -EINVAL; goto port_fail; } malidp->crtc.port = of_get_next_parent(ep); ret = component_bind_all(dev, drm); if (ret) { DRM_ERROR("Failed to bind all components\n"); goto bind_fail; } ret = malidp_irq_init(pdev); if (ret < 0) goto irq_init_fail; drm->irq_enabled = true; ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { DRM_ERROR("failed to initialise vblank\n"); goto vblank_fail; } drm_mode_config_reset(drm); - malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc, drm->mode_config.num_connector); + malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_connector); if (IS_ERR(malidp->fbdev)) { ret = PTR_ERR(malidp->fbdev); malidp->fbdev = NULL; goto fbdev_fail; } drm_kms_helper_poll_init(drm); ret = drm_dev_register(drm, 0); if (ret) goto register_fail; return 0; register_fail: if (malidp->fbdev) { drm_fbdev_cma_fini(malidp->fbdev); malidp->fbdev = NULL; } fbdev_fail: drm_vblank_cleanup(drm); vblank_fail: malidp_se_irq_fini(drm); malidp_de_irq_fini(drm); drm->irq_enabled = false; irq_init_fail: component_unbind_all(dev, drm); bind_fail: of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; port_fail: malidp_fini(drm); init_fail: drm->dev_private = NULL; dev_set_drvdata(dev, NULL); query_hw_fail: clk_disable_unprepare(hwdev->mclk); clk_disable_unprepare(hwdev->aclk); clk_disable_unprepare(hwdev->pclk); drm_dev_unref(drm); alloc_fail: of_reserved_mem_device_release(dev); return ret; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,978
--- initial +++ final @@ -1,78 +1,78 @@ static int meson_drv_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct meson_drm *priv; struct drm_device *drm; struct resource *res; void __iomem *regs; int ret; /* Checks if an output connector is available */ if (!meson_vpu_has_available_connectors(dev)) { dev_err(dev, "No output connector available\n"); return -ENODEV; } drm = drm_dev_alloc(&meson_driver, dev); if (IS_ERR(drm)) return PTR_ERR(drm); priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto free_drm; } drm->dev_private = priv; priv->drm = drm; priv->dev = dev; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu"); regs = devm_ioremap_resource(dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); priv->io_base = regs; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi"); /* Simply ioremap since it may be a shared register zone */ regs = devm_ioremap(dev, res->start, resource_size(res)); if (!regs) return -EADDRNOTAVAIL; priv->hhi = devm_regmap_init_mmio(dev, regs, &meson_regmap_config); if (IS_ERR(priv->hhi)) { dev_err(&pdev->dev, "Couldn't create the HHI regmap\n"); return PTR_ERR(priv->hhi); } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc"); /* Simply ioremap since it may be a shared register zone */ regs = devm_ioremap(dev, res->start, resource_size(res)); if (!regs) return -EADDRNOTAVAIL; priv->dmc = devm_regmap_init_mmio(dev, regs, &meson_regmap_config); if (IS_ERR(priv->dmc)) { dev_err(&pdev->dev, "Couldn't create the DMC regmap\n"); return PTR_ERR(priv->dmc); } priv->vsync_irq = platform_get_irq(pdev, 0); drm_vblank_init(drm, 1); drm_mode_config_init(drm); /* Encoder Initialization */ ret = meson_venc_cvbs_create(priv); if (ret) goto free_drm; /* Hardware Initialization */ meson_venc_init(priv); meson_vpp_init(priv); meson_viu_init(priv); ret = meson_plane_create(priv); if (ret) goto free_drm; ret = meson_crtc_create(priv); if (ret) goto free_drm; ret = drm_irq_install(drm, priv->vsync_irq); if (ret) goto free_drm; drm_mode_config_reset(drm); drm->mode_config.max_width = 8192; drm->mode_config.max_height = 8192; drm->mode_config.funcs = &meson_mode_config_funcs; - priv->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc, drm->mode_config.num_connector); + priv->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_connector); if (IS_ERR(priv->fbdev)) { ret = PTR_ERR(priv->fbdev); goto free_drm; } drm_kms_helper_poll_init(drm); platform_set_drvdata(pdev, priv); ret = drm_dev_register(drm, 0); if (ret) goto free_drm; return 0; free_drm: drm_dev_unref(drm); return ret; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,979
--- initial +++ final @@ -1,26 +1,26 @@ int mgag200_fbdev_init(struct mga_device *mdev) { struct mga_fbdev *mfbdev; int ret; int bpp_sel = 32; /* prefer 16bpp on low end gpus with limited VRAM */ if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048 * 1024)) bpp_sel = 16; mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL); if (!mfbdev) return -ENOMEM; mdev->mfbdev = mfbdev; spin_lock_init(&mfbdev->dirty_lock); drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs); - ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, mdev->num_crtc, MGAG200FB_CONN_LIMIT); + ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, MGAG200FB_CONN_LIMIT); if (ret) goto err_fb_helper; ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper); if (ret) goto err_fb_setup; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(mdev->dev); ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel); if (ret) goto err_fb_setup; return 0; err_fb_setup: drm_fb_helper_fini(&mfbdev->helper); err_fb_helper: mdev->mfbdev = NULL; return ret; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,980
--- initial +++ final @@ -1,26 +1,26 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct msm_fbdev *fbdev = NULL; struct drm_fb_helper *helper; int ret; fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); if (!fbdev) goto fail; helper = &fbdev->base; drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs); - ret = drm_fb_helper_init(dev, helper, priv->num_crtcs, priv->num_connectors); + ret = drm_fb_helper_init(dev, helper, priv->num_connectors); if (ret) { dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret); goto fail; } ret = drm_fb_helper_single_add_all_connectors(helper); if (ret) goto fini; ret = drm_fb_helper_initial_config(helper, 32); if (ret) goto fini; priv->fbdev = helper; return helper; fini: drm_fb_helper_fini(helper); fail: kfree(fbdev); return NULL; }<sep>@@ expression A,B,C,D,E; @@ ( - drm_fb_helper_init(A,B,C,D) + drm_fb_helper_init(A,B,D) | - drm_fbdev_cma_init_with_funcs(A,B,C,D,E) + drm_fbdev_cma_init_with_funcs(A,B,D,E) | - drm_fbdev_cma_init(A,B,C,D) + drm_fbdev_cma_init(A,B,D) ) <|end_of_text|>
8,981